From 4fe1647c5151b7c7cf9d0b00f4f8ac2a5c6f53dc Mon Sep 17 00:00:00 2001 From: Aleksandar Himel Date: Mon, 3 Nov 2025 17:27:25 +0100 Subject: [PATCH 01/47] POC CNI and CNS changes to support IPv6 Secondary IPs. Additional work needed, draft only --- cni/network/invoker_cns.go | 56 +++++++++++ cni/network/network.go | 12 ++- cns/NetworkContainerContract.go | 1 + cns/restserver/ipam.go | 3 +- cns/restserver/util.go | 166 ++++++++++++++++++++++++++++++++ network/endpoint_windows.go | 5 + network/network_windows.go | 26 ++++- 7 files changed, 263 insertions(+), 6 deletions(-) diff --git a/cni/network/invoker_cns.go b/cni/network/invoker_cns.go index 928096b361..4c26c854b6 100644 --- a/cni/network/invoker_cns.go +++ b/cni/network/invoker_cns.go @@ -57,6 +57,7 @@ type IPResultInfo struct { routes []cns.Route pnpID string endpointPolicies []policy.Policy + secondaryIPs map[string]cns.SecondaryIPConfig } func (i IPResultInfo) MarshalLogObject(encoder zapcore.ObjectEncoder) error { @@ -162,6 +163,7 @@ func (invoker *CNSIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, erro routes: response.PodIPInfo[i].Routes, pnpID: response.PodIPInfo[i].PnPID, endpointPolicies: response.PodIPInfo[i].EndpointPolicies, + secondaryIPs: response.PodIPInfo[i].SecondaryIPConfigs, } logger.Info("Received info for pod", @@ -505,9 +507,63 @@ func configureSecondaryAddResult(info *IPResultInfo, addResult *IPAMAddResult, p SkipDefaultRoutes: info.skipDefaultRoutes, } + if len(info.secondaryIPs) > 0 { + secIPConfig, err := BuildIPConfigForV6(info.secondaryIPs) + + if err == nil { + // If BuildIPConfigForV6 returns a value, take its address + ifaceInfo := addResult.interfaceInfo[key] + ifaceInfo.IPConfigs = append(ifaceInfo.IPConfigs, &secIPConfig) + addResult.interfaceInfo[key] = ifaceInfo + } + } + return nil } +// BuildIPConfigForV6 takes SecondaryIPConfigs and returns an IPConfig. +// Assumes map has at least one element and uses the first one found. +func BuildIPConfigForV6(secondaryIPs map[string]cns.SecondaryIPConfig) (network.IPConfig, error) { + for _, v := range secondaryIPs { + ip, ipNet, err := net.ParseCIDR(v.IPAddress) + if err != nil { + return network.IPConfig{}, fmt.Errorf("invalid IPAddress %q: %w", v.IPAddress, err) + } + if ip.To4() != nil { + return network.IPConfig{}, fmt.Errorf("expected IPv6, got IPv4: %q", v.IPAddress) + } + + // Preserve the original address/prefix (often /128) for the endpoint. + addr := *ipNet + + // Compute the gateway from the /64 network: + // If the parsed mask is /128, swap to /64 for the base; otherwise if already <= /64, use it. + ones, bits := ipNet.Mask.Size() + gwMask := ipNet.Mask + if ones > 64 { // e.g., /128 + gwMask = net.CIDRMask(64, bits) + } + + // Base = ip masked with /64 + base := ip.Mask(gwMask).To16() + if base == nil { + return network.IPConfig{}, fmt.Errorf("failed to get 16-byte IPv6 for %q", v.IPAddress) + } + + // Set gateway to ...:...:...:1 (i.e., last byte = 1) + gw := make(net.IP, len(base)) + copy(gw, base) + gw[15] = 0x01 // ::1 within that /64 + + return network.IPConfig{ + Address: addr, // original ipNet (likely /128) + Gateway: gw, // derived from /64 base + }, nil + } + + return network.IPConfig{}, fmt.Errorf("map is empty") +} + func addBackendNICToResult(info *IPResultInfo, addResult *IPAMAddResult, key string) error { macAddress, err := net.ParseMAC(info.macAddress) if err != nil { diff --git a/cni/network/network.go b/cni/network/network.go index 1ec1666f45..9b76db5cdc 100644 --- a/cni/network/network.go +++ b/cni/network/network.go @@ -567,6 +567,8 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { natInfo := getNATInfo(nwCfg, options[network.SNATIPKey], enableSnatForDNS) networkID, _ := plugin.getNetworkID(args.Netns, &ifInfo, nwCfg) + isIPv6 := ipamAddResult.ipv6Enabled + createEpInfoOpt := createEpInfoOpt{ nwCfg: nwCfg, cnsNetworkConfig: ifInfo.NCResponse, @@ -582,7 +584,7 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { networkID: networkID, ifInfo: &ifInfo, ipamAddConfig: &ipamAddConfig, - ipv6Enabled: ipamAddResult.ipv6Enabled, + ipv6Enabled: isIPv6, infraSeen: &infraSeen, endpointIndex: endpointIndex, } @@ -1411,11 +1413,15 @@ func convertInterfaceInfoToCniResult(info network.InterfaceInfo, ifName string) if len(info.IPConfigs) > 0 { for _, ipconfig := range info.IPConfigs { - result.IPs = append(result.IPs, &cniTypesCurr.IPConfig{Address: ipconfig.Address, Gateway: ipconfig.Gateway}) + if ipconfig.Address.IP.To4() != nil { + result.IPs = append(result.IPs, &cniTypesCurr.IPConfig{Address: ipconfig.Address, Gateway: ipconfig.Gateway}) + } } for i := range info.Routes { - result.Routes = append(result.Routes, &cniTypes.Route{Dst: info.Routes[i].Dst, GW: info.Routes[i].Gw}) + if info.Routes[i].Gw.To4() != nil { + result.Routes = append(result.Routes, &cniTypes.Route{Dst: info.Routes[i].Dst, GW: info.Routes[i].Gw}) + } } } diff --git a/cns/NetworkContainerContract.go b/cns/NetworkContainerContract.go index 8f5939c28e..ea0b6ef7e0 100644 --- a/cns/NetworkContainerContract.go +++ b/cns/NetworkContainerContract.go @@ -503,6 +503,7 @@ type GetNetworkContainerResponse struct { type PodIpInfo struct { PodIPConfig IPSubnet + SecondaryIPConfigs map[string]SecondaryIPConfig // uuid is key NetworkContainerPrimaryIPConfig IPConfiguration HostPrimaryIPInfo HostIPInfo NICType NICType diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index 7c1366149d..f1d6aa5ee1 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -137,7 +137,7 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte // IMPORTANT: although SwiftV2 reuses the concept of NCs, NMAgent doesn't program NCs for SwiftV2, but // instead programs NICs. When getting SwiftV2 NCs, we want the NIC type and MAC address of the NCs. // TODO: we need another way to verify and sync NMAgent's NIC programming status. pending new NMAgent API or NIC programming status to be passed in the SwiftV2 create NC request. - resp := service.getAllNetworkContainerResponses(cnsRequest) //nolint:contextcheck // not passed in any methods, appease linter + resp, respCreateRequest := service.getAllNetworkContainerResponsesIPv6(cnsRequest) //nolint:contextcheck // not passed in any methods, appease linter // return err if returned list has no NCs if len(resp) == 0 { return &cns.IPConfigsResponse{ @@ -156,6 +156,7 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte MacAddress: resp[i].NetworkInterfaceInfo.MACAddress, NICType: resp[i].NetworkInterfaceInfo.NICType, NetworkContainerPrimaryIPConfig: resp[i].IPConfiguration, + SecondaryIPConfigs: respCreateRequest[i].SecondaryIPConfigs, } podIPInfoList = append(podIPInfoList, podIPInfo) } diff --git a/cns/restserver/util.go b/cns/restserver/util.go index a84eb8cef0..1de7aed61d 100644 --- a/cns/restserver/util.go +++ b/cns/restserver/util.go @@ -550,6 +550,172 @@ func (service *HTTPRestService) getAllNetworkContainerResponses( return getNetworkContainersResponse } +// Copy of above function as I can't easly change the GetNetworkContainerResponse, too many dependancies +func (service *HTTPRestService) getAllNetworkContainerResponsesIPv6( + req cns.GetNetworkContainerRequest, +) ([]cns.GetNetworkContainerResponse, []cns.CreateNetworkContainerRequest) { + var ( + getNetworkContainerResponse cns.GetNetworkContainerResponse + ncs []string + skipNCVersionCheck = false + ) + + service.Lock() + defer service.Unlock() + + switch service.state.OrchestratorType { + case cns.Kubernetes, cns.ServiceFabric, cns.Batch, cns.DBforPostgreSQL, cns.AzureFirstParty: + podInfo, err := cns.UnmarshalPodInfo(req.OrchestratorContext) + getNetworkContainersResponse := []cns.GetNetworkContainerResponse{} + + if err != nil { + response := cns.Response{ + ReturnCode: types.UnexpectedError, + Message: fmt.Sprintf("Unmarshalling orchestrator context failed with error %v", err), + } + + getNetworkContainerResponse.Response = response + getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) + return getNetworkContainersResponse, nil + } + + // get networkContainerIDs as string, "nc1, nc2" + orchestratorContext := podInfo.Name() + podInfo.Namespace() + if service.state.ContainerIDByOrchestratorContext[orchestratorContext] != nil { + ncs = strings.Split(string(*service.state.ContainerIDByOrchestratorContext[orchestratorContext]), ",") + } + + // This indicates that there are no ncs for the given orchestrator context + if len(ncs) == 0 { + response := cns.Response{ + ReturnCode: types.UnknownContainerID, + Message: fmt.Sprintf("Failed to find networkContainerID for orchestratorContext %s", orchestratorContext), + } + + getNetworkContainerResponse.Response = response + getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) + return getNetworkContainersResponse, nil + } + + ctx, cancel := context.WithTimeout(context.Background(), nmaAPICallTimeout) + defer cancel() + ncVersionListResp, err := service.nma.GetNCVersionList(ctx) + if err != nil { + skipNCVersionCheck = true + logger.Errorf("failed to get nc version list from nmagent") + // TODO: Add telemetry as this has potential to have containers in the running state w/o datapath working + } + nmaNCs := map[string]string{} + for _, nc := range ncVersionListResp.Containers { + // store nmaNCID as lower case to allow case insensitive comparison with nc stored in CNS + nmaNCs[strings.TrimPrefix(lowerCaseNCGuid(nc.NetworkContainerID), cns.SwiftPrefix)] = nc.Version + } + + if !skipNCVersionCheck { + for _, ncid := range ncs { + waitingForUpdate := false + // If the goal state is available with CNS, check if the NC is pending VFP programming + waitingForUpdate, getNetworkContainerResponse.Response.ReturnCode, getNetworkContainerResponse.Response.Message = service.isNCWaitingForUpdate(service.state.ContainerStatus[ncid].CreateNetworkContainerRequest.Version, ncid, nmaNCs) //nolint:lll // bad code + // If the return code is not success, return the error to the caller + if getNetworkContainerResponse.Response.ReturnCode == types.NetworkContainerVfpProgramPending { + logger.Errorf("[Azure-CNS] isNCWaitingForUpdate failed for NCID: %s", ncid) + } + + vfpUpdateComplete := !waitingForUpdate + ncstatus := service.state.ContainerStatus[ncid] + // Update the container status if- + // 1. VfpUpdateCompleted successfully + // 2. VfpUpdateComplete changed to false + if (getNetworkContainerResponse.Response.ReturnCode == types.NetworkContainerVfpProgramComplete && + vfpUpdateComplete && ncstatus.VfpUpdateComplete != vfpUpdateComplete) || + (!vfpUpdateComplete && ncstatus.VfpUpdateComplete != vfpUpdateComplete) { + logger.Printf("[Azure-CNS] Setting VfpUpdateComplete to %t for NCID: %s", vfpUpdateComplete, ncid) + ncstatus.VfpUpdateComplete = vfpUpdateComplete + service.state.ContainerStatus[ncid] = ncstatus + if err = service.saveState(); err != nil { + logger.Errorf("Failed to save goal states for nc %+v due to %s", getNetworkContainerResponse, err) + } + } + } + } + + if service.ChannelMode == cns.Managed { + // If the NC goal state doesn't exist in CNS running in managed mode, call DNC to retrieve the goal state + var ( + dncEP = service.GetOption(acn.OptPrivateEndpoint).(string) + infraVnet = service.GetOption(acn.OptInfrastructureNetworkID).(string) + nodeID = service.GetOption(acn.OptNodeID).(string) + ) + + service.Unlock() + getNetworkContainerResponse.Response.ReturnCode, getNetworkContainerResponse.Response.Message = service.SyncNodeStatus(dncEP, infraVnet, nodeID, req.OrchestratorContext) + service.Lock() + if getNetworkContainerResponse.Response.ReturnCode == types.NotFound { + getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) + return getNetworkContainersResponse, nil + } + } + default: + getNetworkContainersResponse := []cns.GetNetworkContainerResponse{} + response := cns.Response{ + ReturnCode: types.UnsupportedOrchestratorType, + Message: fmt.Sprintf("Invalid orchestrator type %v", service.state.OrchestratorType), + } + + getNetworkContainerResponse.Response = response + getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) + return getNetworkContainersResponse, nil + } + + getNetworkContainersResponse := []cns.GetNetworkContainerResponse{} + getCreateNetworkContainersRequest := []cns.CreateNetworkContainerRequest{} + + for _, ncid := range ncs { + containerStatus := service.state.ContainerStatus + containerDetails, ok := containerStatus[ncid] + if !ok { + response := cns.Response{ + ReturnCode: types.UnknownContainerID, + Message: "NetworkContainer doesn't exist.", + } + + getNetworkContainerResponse.Response = response + getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) + continue + } + + savedReq := containerDetails.CreateNetworkContainerRequest + getNetworkContainerResponse = cns.GetNetworkContainerResponse{ + NetworkContainerID: savedReq.NetworkContainerid, + IPConfiguration: savedReq.IPConfiguration, + Routes: savedReq.Routes, + CnetAddressSpace: savedReq.CnetAddressSpace, + MultiTenancyInfo: savedReq.MultiTenancyInfo, + PrimaryInterfaceIdentifier: savedReq.PrimaryInterfaceIdentifier, + LocalIPConfiguration: savedReq.LocalIPConfiguration, + AllowHostToNCCommunication: savedReq.AllowHostToNCCommunication, + AllowNCToHostCommunication: savedReq.AllowNCToHostCommunication, + NetworkInterfaceInfo: savedReq.NetworkInterfaceInfo, + } + + // If the NC version check wasn't skipped, take into account the VFP programming status when returning the response + if !skipNCVersionCheck { + if !containerDetails.VfpUpdateComplete { + getNetworkContainerResponse.Response = cns.Response{ + ReturnCode: types.NetworkContainerVfpProgramPending, + Message: "NetworkContainer VFP programming is pending", + } + } + } + getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) + getCreateNetworkContainersRequest = append(getCreateNetworkContainersRequest, savedReq) + } + + logger.Printf("getNetworkContainersResponses are %+v", getNetworkContainersResponse) + + return getNetworkContainersResponse, getCreateNetworkContainersRequest +} + // restoreNetworkState restores Network state that existed before reboot. func (service *HTTPRestService) restoreNetworkState() error { logger.Printf("[Azure CNS] Enter Restoring Network State") diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index edd52327f2..d0478f6ff8 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -343,6 +343,11 @@ func (nw *network) configureHcnEndpoint(epInfo *EndpointInfo) (*hcn.HostComputeE for _, ipAddress := range epInfo.IPAddresses { prefixLength, _ := ipAddress.Mask.Size() + + if ipAddress.IP.To4() == nil { + prefixLength = 64 + } + ipConfiguration := hcn.IpConfig{ IpAddress: ipAddress.IP.String(), PrefixLength: uint8(prefixLength), diff --git a/network/network_windows.go b/network/network_windows.go index a467b20983..ce2f0fe045 100644 --- a/network/network_windows.go +++ b/network/network_windows.go @@ -6,6 +6,7 @@ package network import ( "encoding/json" "fmt" + "net" "strconv" "strings" "time" @@ -289,13 +290,34 @@ func (nm *networkManager) configureHcnNetwork(nwInfo *EndpointInfo, extIf *exter // Populate subnets. for _, subnet := range nwInfo.Subnets { + + prefix := subnet.Prefix + if prefix.IP.To4() == nil { + // IPv6: normalize to /64 + prefix.Mask = net.CIDRMask(64, 128) + prefix.IP = prefix.IP.Mask(prefix.Mask) // zero out host bits + } + prefixStr := prefix.String() // e.g., fd00:da04:74ff:0::/64 + + // Check if it's IPv6 + if subnet.Prefix.IP.To4() == nil { + // IPv6: replace /128 with /64 if present + prefixStr = strings.Replace(prefixStr, "/128", "/64", 1) + } + + // Choose route based on IP family + routeDest := defaultRouteCIDR + if subnet.Prefix.IP.To4() == nil { + routeDest = defaultIPv6Route + } + hnsSubnet := hcn.Subnet{ - IpAddressPrefix: subnet.Prefix.String(), + IpAddressPrefix: prefixStr, // Set the Gateway route Routes: []hcn.Route{ { NextHop: subnet.Gateway.String(), - DestinationPrefix: defaultRouteCIDR, + DestinationPrefix: routeDest, }, }, } From d6e3480a38ff1b63ec26f8e5ec27d1a7af74a267 Mon Sep 17 00:00:00 2001 From: Aleksandar Himel Date: Thu, 20 Nov 2025 19:38:06 +0100 Subject: [PATCH 02/47] Cleaning up all the changes to keep only minimal needed for POC to work. Parts of logic moved to DNC to align how it is done for generic IPv4 case as well. --- cni/network/invoker_cns.go | 128 ++++++++++++------------ cni/network/network.go | 12 +-- cns/NetworkContainerContract.go | 1 + cns/restserver/ipam.go | 4 +- cns/restserver/util.go | 167 +------------------------------- network/endpoint_windows.go | 4 - network/network_windows.go | 18 +--- 7 files changed, 68 insertions(+), 266 deletions(-) diff --git a/cni/network/invoker_cns.go b/cni/network/invoker_cns.go index 4c26c854b6..2754b2d886 100644 --- a/cni/network/invoker_cns.go +++ b/cni/network/invoker_cns.go @@ -44,20 +44,21 @@ type CNSIPAMInvoker struct { } type IPResultInfo struct { - podIPAddress string - ncSubnetPrefix uint8 - ncPrimaryIP string - ncGatewayIPAddress string - hostSubnet string - hostPrimaryIP string - hostGateway string - nicType cns.NICType - macAddress string - skipDefaultRoutes bool - routes []cns.Route - pnpID string - endpointPolicies []policy.Policy - secondaryIPs map[string]cns.SecondaryIPConfig + podIPAddress string + ncSubnetPrefix uint8 + ncPrimaryIP string + ncGatewayIPAddress string + ncGatewayIPv6Address string + hostSubnet string + hostPrimaryIP string + hostGateway string + nicType cns.NICType + macAddress string + skipDefaultRoutes bool + routes []cns.Route + pnpID string + endpointPolicies []policy.Policy + secondaryIPs map[string]cns.SecondaryIPConfig } func (i IPResultInfo) MarshalLogObject(encoder zapcore.ObjectEncoder) error { @@ -150,20 +151,21 @@ func (invoker *CNSIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, erro for i := 0; i < len(response.PodIPInfo); i++ { info := IPResultInfo{ - podIPAddress: response.PodIPInfo[i].PodIPConfig.IPAddress, - ncSubnetPrefix: response.PodIPInfo[i].NetworkContainerPrimaryIPConfig.IPSubnet.PrefixLength, - ncPrimaryIP: response.PodIPInfo[i].NetworkContainerPrimaryIPConfig.IPSubnet.IPAddress, - ncGatewayIPAddress: response.PodIPInfo[i].NetworkContainerPrimaryIPConfig.GatewayIPAddress, - hostSubnet: response.PodIPInfo[i].HostPrimaryIPInfo.Subnet, - hostPrimaryIP: response.PodIPInfo[i].HostPrimaryIPInfo.PrimaryIP, - hostGateway: response.PodIPInfo[i].HostPrimaryIPInfo.Gateway, - nicType: response.PodIPInfo[i].NICType, - macAddress: response.PodIPInfo[i].MacAddress, - skipDefaultRoutes: response.PodIPInfo[i].SkipDefaultRoutes, - routes: response.PodIPInfo[i].Routes, - pnpID: response.PodIPInfo[i].PnPID, - endpointPolicies: response.PodIPInfo[i].EndpointPolicies, - secondaryIPs: response.PodIPInfo[i].SecondaryIPConfigs, + podIPAddress: response.PodIPInfo[i].PodIPConfig.IPAddress, + ncSubnetPrefix: response.PodIPInfo[i].NetworkContainerPrimaryIPConfig.IPSubnet.PrefixLength, + ncPrimaryIP: response.PodIPInfo[i].NetworkContainerPrimaryIPConfig.IPSubnet.IPAddress, + ncGatewayIPAddress: response.PodIPInfo[i].NetworkContainerPrimaryIPConfig.GatewayIPAddress, + ncGatewayIPv6Address: response.PodIPInfo[i].NetworkContainerPrimaryIPConfig.GatewayIPv6Address, + hostSubnet: response.PodIPInfo[i].HostPrimaryIPInfo.Subnet, + hostPrimaryIP: response.PodIPInfo[i].HostPrimaryIPInfo.PrimaryIP, + hostGateway: response.PodIPInfo[i].HostPrimaryIPInfo.Gateway, + nicType: response.PodIPInfo[i].NICType, + macAddress: response.PodIPInfo[i].MacAddress, + skipDefaultRoutes: response.PodIPInfo[i].SkipDefaultRoutes, + routes: response.PodIPInfo[i].Routes, + pnpID: response.PodIPInfo[i].PnPID, + endpointPolicies: response.PodIPInfo[i].EndpointPolicies, + secondaryIPs: response.PodIPInfo[i].SecondaryIPConfigs, } logger.Info("Received info for pod", @@ -508,7 +510,8 @@ func configureSecondaryAddResult(info *IPResultInfo, addResult *IPAMAddResult, p } if len(info.secondaryIPs) > 0 { - secIPConfig, err := BuildIPConfigForV6(info.secondaryIPs) + // assumtion that first address is the only important one and that it is IPv6 + secIPConfig, err := BuildIPConfigForV6(info.secondaryIPs, info.ncGatewayIPv6Address) if err == nil { // If BuildIPConfigForV6 returns a value, take its address @@ -523,45 +526,34 @@ func configureSecondaryAddResult(info *IPResultInfo, addResult *IPAMAddResult, p // BuildIPConfigForV6 takes SecondaryIPConfigs and returns an IPConfig. // Assumes map has at least one element and uses the first one found. -func BuildIPConfigForV6(secondaryIPs map[string]cns.SecondaryIPConfig) (network.IPConfig, error) { - for _, v := range secondaryIPs { - ip, ipNet, err := net.ParseCIDR(v.IPAddress) - if err != nil { - return network.IPConfig{}, fmt.Errorf("invalid IPAddress %q: %w", v.IPAddress, err) - } - if ip.To4() != nil { - return network.IPConfig{}, fmt.Errorf("expected IPv6, got IPv4: %q", v.IPAddress) - } - - // Preserve the original address/prefix (often /128) for the endpoint. - addr := *ipNet - - // Compute the gateway from the /64 network: - // If the parsed mask is /128, swap to /64 for the base; otherwise if already <= /64, use it. - ones, bits := ipNet.Mask.Size() - gwMask := ipNet.Mask - if ones > 64 { // e.g., /128 - gwMask = net.CIDRMask(64, bits) - } - - // Base = ip masked with /64 - base := ip.Mask(gwMask).To16() - if base == nil { - return network.IPConfig{}, fmt.Errorf("failed to get 16-byte IPv6 for %q", v.IPAddress) - } - - // Set gateway to ...:...:...:1 (i.e., last byte = 1) - gw := make(net.IP, len(base)) - copy(gw, base) - gw[15] = 0x01 // ::1 within that /64 - - return network.IPConfig{ - Address: addr, // original ipNet (likely /128) - Gateway: gw, // derived from /64 base - }, nil - } - - return network.IPConfig{}, fmt.Errorf("map is empty") +func BuildIPConfigForV6(secondaryIPs map[string]cns.SecondaryIPConfig, gatewayIPv6 string) (network.IPConfig, error) { + for _, v := range secondaryIPs { + ip, ipnet, err := net.ParseCIDR(v.IPAddress) + if err != nil { + return network.IPConfig{}, fmt.Errorf("invalid IPAddress %q: %w", v.IPAddress, err) + } + if ip.To4() != nil { + return network.IPConfig{}, fmt.Errorf("expected IPv6, got IPv4: %q", v.IPAddress) + } + + gwIP := net.ParseIP(gatewayIPv6) + if gwIP == nil { + return network.IPConfig{}, fmt.Errorf("invalid Gateway IPAddress %q: %w", gatewayIPv6, err) + } + if gwIP.To4() != nil { + return network.IPConfig{}, fmt.Errorf("expected IPv6 Gateway, got IPv4 Gateway: %q", gatewayIPv6) + } + + return network.IPConfig{ + Address: net.IPNet{ + IP: ip, + Mask: ipnet.Mask, + }, + Gateway: gwIP, // derived from /64 base + }, nil + } + + return network.IPConfig{}, fmt.Errorf("map is empty") } func addBackendNICToResult(info *IPResultInfo, addResult *IPAMAddResult, key string) error { diff --git a/cni/network/network.go b/cni/network/network.go index 9b76db5cdc..1ec1666f45 100644 --- a/cni/network/network.go +++ b/cni/network/network.go @@ -567,8 +567,6 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { natInfo := getNATInfo(nwCfg, options[network.SNATIPKey], enableSnatForDNS) networkID, _ := plugin.getNetworkID(args.Netns, &ifInfo, nwCfg) - isIPv6 := ipamAddResult.ipv6Enabled - createEpInfoOpt := createEpInfoOpt{ nwCfg: nwCfg, cnsNetworkConfig: ifInfo.NCResponse, @@ -584,7 +582,7 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { networkID: networkID, ifInfo: &ifInfo, ipamAddConfig: &ipamAddConfig, - ipv6Enabled: isIPv6, + ipv6Enabled: ipamAddResult.ipv6Enabled, infraSeen: &infraSeen, endpointIndex: endpointIndex, } @@ -1413,15 +1411,11 @@ func convertInterfaceInfoToCniResult(info network.InterfaceInfo, ifName string) if len(info.IPConfigs) > 0 { for _, ipconfig := range info.IPConfigs { - if ipconfig.Address.IP.To4() != nil { - result.IPs = append(result.IPs, &cniTypesCurr.IPConfig{Address: ipconfig.Address, Gateway: ipconfig.Gateway}) - } + result.IPs = append(result.IPs, &cniTypesCurr.IPConfig{Address: ipconfig.Address, Gateway: ipconfig.Gateway}) } for i := range info.Routes { - if info.Routes[i].Gw.To4() != nil { - result.Routes = append(result.Routes, &cniTypes.Route{Dst: info.Routes[i].Dst, GW: info.Routes[i].Gw}) - } + result.Routes = append(result.Routes, &cniTypes.Route{Dst: info.Routes[i].Dst, GW: info.Routes[i].Gw}) } } diff --git a/cns/NetworkContainerContract.go b/cns/NetworkContainerContract.go index ea0b6ef7e0..1795bb607b 100644 --- a/cns/NetworkContainerContract.go +++ b/cns/NetworkContainerContract.go @@ -490,6 +490,7 @@ type GetNetworkContainerRequest struct { type GetNetworkContainerResponse struct { NetworkContainerID string IPConfiguration IPConfiguration + SecondaryIPConfigs map[string]SecondaryIPConfig // uuid is key Routes []Route CnetAddressSpace []IPSubnet MultiTenancyInfo MultiTenancyInfo diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index f1d6aa5ee1..cbc663e9e9 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -137,7 +137,7 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte // IMPORTANT: although SwiftV2 reuses the concept of NCs, NMAgent doesn't program NCs for SwiftV2, but // instead programs NICs. When getting SwiftV2 NCs, we want the NIC type and MAC address of the NCs. // TODO: we need another way to verify and sync NMAgent's NIC programming status. pending new NMAgent API or NIC programming status to be passed in the SwiftV2 create NC request. - resp, respCreateRequest := service.getAllNetworkContainerResponsesIPv6(cnsRequest) //nolint:contextcheck // not passed in any methods, appease linter + resp := service.getAllNetworkContainerResponses(cnsRequest) //nolint:contextcheck // not passed in any methods, appease linter // return err if returned list has no NCs if len(resp) == 0 { return &cns.IPConfigsResponse{ @@ -156,7 +156,7 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte MacAddress: resp[i].NetworkInterfaceInfo.MACAddress, NICType: resp[i].NetworkInterfaceInfo.NICType, NetworkContainerPrimaryIPConfig: resp[i].IPConfiguration, - SecondaryIPConfigs: respCreateRequest[i].SecondaryIPConfigs, + SecondaryIPConfigs: resp[i].SecondaryIPConfigs, } podIPInfoList = append(podIPInfoList, podIPInfo) } diff --git a/cns/restserver/util.go b/cns/restserver/util.go index 1de7aed61d..e95d3c535b 100644 --- a/cns/restserver/util.go +++ b/cns/restserver/util.go @@ -523,6 +523,7 @@ func (service *HTTPRestService) getAllNetworkContainerResponses( getNetworkContainerResponse = cns.GetNetworkContainerResponse{ NetworkContainerID: savedReq.NetworkContainerid, IPConfiguration: savedReq.IPConfiguration, + SecondaryIPConfigs: savedReq.SecondaryIPConfigs, Routes: savedReq.Routes, CnetAddressSpace: savedReq.CnetAddressSpace, MultiTenancyInfo: savedReq.MultiTenancyInfo, @@ -550,172 +551,6 @@ func (service *HTTPRestService) getAllNetworkContainerResponses( return getNetworkContainersResponse } -// Copy of above function as I can't easly change the GetNetworkContainerResponse, too many dependancies -func (service *HTTPRestService) getAllNetworkContainerResponsesIPv6( - req cns.GetNetworkContainerRequest, -) ([]cns.GetNetworkContainerResponse, []cns.CreateNetworkContainerRequest) { - var ( - getNetworkContainerResponse cns.GetNetworkContainerResponse - ncs []string - skipNCVersionCheck = false - ) - - service.Lock() - defer service.Unlock() - - switch service.state.OrchestratorType { - case cns.Kubernetes, cns.ServiceFabric, cns.Batch, cns.DBforPostgreSQL, cns.AzureFirstParty: - podInfo, err := cns.UnmarshalPodInfo(req.OrchestratorContext) - getNetworkContainersResponse := []cns.GetNetworkContainerResponse{} - - if err != nil { - response := cns.Response{ - ReturnCode: types.UnexpectedError, - Message: fmt.Sprintf("Unmarshalling orchestrator context failed with error %v", err), - } - - getNetworkContainerResponse.Response = response - getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) - return getNetworkContainersResponse, nil - } - - // get networkContainerIDs as string, "nc1, nc2" - orchestratorContext := podInfo.Name() + podInfo.Namespace() - if service.state.ContainerIDByOrchestratorContext[orchestratorContext] != nil { - ncs = strings.Split(string(*service.state.ContainerIDByOrchestratorContext[orchestratorContext]), ",") - } - - // This indicates that there are no ncs for the given orchestrator context - if len(ncs) == 0 { - response := cns.Response{ - ReturnCode: types.UnknownContainerID, - Message: fmt.Sprintf("Failed to find networkContainerID for orchestratorContext %s", orchestratorContext), - } - - getNetworkContainerResponse.Response = response - getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) - return getNetworkContainersResponse, nil - } - - ctx, cancel := context.WithTimeout(context.Background(), nmaAPICallTimeout) - defer cancel() - ncVersionListResp, err := service.nma.GetNCVersionList(ctx) - if err != nil { - skipNCVersionCheck = true - logger.Errorf("failed to get nc version list from nmagent") - // TODO: Add telemetry as this has potential to have containers in the running state w/o datapath working - } - nmaNCs := map[string]string{} - for _, nc := range ncVersionListResp.Containers { - // store nmaNCID as lower case to allow case insensitive comparison with nc stored in CNS - nmaNCs[strings.TrimPrefix(lowerCaseNCGuid(nc.NetworkContainerID), cns.SwiftPrefix)] = nc.Version - } - - if !skipNCVersionCheck { - for _, ncid := range ncs { - waitingForUpdate := false - // If the goal state is available with CNS, check if the NC is pending VFP programming - waitingForUpdate, getNetworkContainerResponse.Response.ReturnCode, getNetworkContainerResponse.Response.Message = service.isNCWaitingForUpdate(service.state.ContainerStatus[ncid].CreateNetworkContainerRequest.Version, ncid, nmaNCs) //nolint:lll // bad code - // If the return code is not success, return the error to the caller - if getNetworkContainerResponse.Response.ReturnCode == types.NetworkContainerVfpProgramPending { - logger.Errorf("[Azure-CNS] isNCWaitingForUpdate failed for NCID: %s", ncid) - } - - vfpUpdateComplete := !waitingForUpdate - ncstatus := service.state.ContainerStatus[ncid] - // Update the container status if- - // 1. VfpUpdateCompleted successfully - // 2. VfpUpdateComplete changed to false - if (getNetworkContainerResponse.Response.ReturnCode == types.NetworkContainerVfpProgramComplete && - vfpUpdateComplete && ncstatus.VfpUpdateComplete != vfpUpdateComplete) || - (!vfpUpdateComplete && ncstatus.VfpUpdateComplete != vfpUpdateComplete) { - logger.Printf("[Azure-CNS] Setting VfpUpdateComplete to %t for NCID: %s", vfpUpdateComplete, ncid) - ncstatus.VfpUpdateComplete = vfpUpdateComplete - service.state.ContainerStatus[ncid] = ncstatus - if err = service.saveState(); err != nil { - logger.Errorf("Failed to save goal states for nc %+v due to %s", getNetworkContainerResponse, err) - } - } - } - } - - if service.ChannelMode == cns.Managed { - // If the NC goal state doesn't exist in CNS running in managed mode, call DNC to retrieve the goal state - var ( - dncEP = service.GetOption(acn.OptPrivateEndpoint).(string) - infraVnet = service.GetOption(acn.OptInfrastructureNetworkID).(string) - nodeID = service.GetOption(acn.OptNodeID).(string) - ) - - service.Unlock() - getNetworkContainerResponse.Response.ReturnCode, getNetworkContainerResponse.Response.Message = service.SyncNodeStatus(dncEP, infraVnet, nodeID, req.OrchestratorContext) - service.Lock() - if getNetworkContainerResponse.Response.ReturnCode == types.NotFound { - getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) - return getNetworkContainersResponse, nil - } - } - default: - getNetworkContainersResponse := []cns.GetNetworkContainerResponse{} - response := cns.Response{ - ReturnCode: types.UnsupportedOrchestratorType, - Message: fmt.Sprintf("Invalid orchestrator type %v", service.state.OrchestratorType), - } - - getNetworkContainerResponse.Response = response - getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) - return getNetworkContainersResponse, nil - } - - getNetworkContainersResponse := []cns.GetNetworkContainerResponse{} - getCreateNetworkContainersRequest := []cns.CreateNetworkContainerRequest{} - - for _, ncid := range ncs { - containerStatus := service.state.ContainerStatus - containerDetails, ok := containerStatus[ncid] - if !ok { - response := cns.Response{ - ReturnCode: types.UnknownContainerID, - Message: "NetworkContainer doesn't exist.", - } - - getNetworkContainerResponse.Response = response - getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) - continue - } - - savedReq := containerDetails.CreateNetworkContainerRequest - getNetworkContainerResponse = cns.GetNetworkContainerResponse{ - NetworkContainerID: savedReq.NetworkContainerid, - IPConfiguration: savedReq.IPConfiguration, - Routes: savedReq.Routes, - CnetAddressSpace: savedReq.CnetAddressSpace, - MultiTenancyInfo: savedReq.MultiTenancyInfo, - PrimaryInterfaceIdentifier: savedReq.PrimaryInterfaceIdentifier, - LocalIPConfiguration: savedReq.LocalIPConfiguration, - AllowHostToNCCommunication: savedReq.AllowHostToNCCommunication, - AllowNCToHostCommunication: savedReq.AllowNCToHostCommunication, - NetworkInterfaceInfo: savedReq.NetworkInterfaceInfo, - } - - // If the NC version check wasn't skipped, take into account the VFP programming status when returning the response - if !skipNCVersionCheck { - if !containerDetails.VfpUpdateComplete { - getNetworkContainerResponse.Response = cns.Response{ - ReturnCode: types.NetworkContainerVfpProgramPending, - Message: "NetworkContainer VFP programming is pending", - } - } - } - getNetworkContainersResponse = append(getNetworkContainersResponse, getNetworkContainerResponse) - getCreateNetworkContainersRequest = append(getCreateNetworkContainersRequest, savedReq) - } - - logger.Printf("getNetworkContainersResponses are %+v", getNetworkContainersResponse) - - return getNetworkContainersResponse, getCreateNetworkContainersRequest -} - // restoreNetworkState restores Network state that existed before reboot. func (service *HTTPRestService) restoreNetworkState() error { logger.Printf("[Azure CNS] Enter Restoring Network State") diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index d0478f6ff8..6c3536383d 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -344,10 +344,6 @@ func (nw *network) configureHcnEndpoint(epInfo *EndpointInfo) (*hcn.HostComputeE for _, ipAddress := range epInfo.IPAddresses { prefixLength, _ := ipAddress.Mask.Size() - if ipAddress.IP.To4() == nil { - prefixLength = 64 - } - ipConfiguration := hcn.IpConfig{ IpAddress: ipAddress.IP.String(), PrefixLength: uint8(prefixLength), diff --git a/network/network_windows.go b/network/network_windows.go index ce2f0fe045..3107800eed 100644 --- a/network/network_windows.go +++ b/network/network_windows.go @@ -6,7 +6,6 @@ package network import ( "encoding/json" "fmt" - "net" "strconv" "strings" "time" @@ -290,21 +289,6 @@ func (nm *networkManager) configureHcnNetwork(nwInfo *EndpointInfo, extIf *exter // Populate subnets. for _, subnet := range nwInfo.Subnets { - - prefix := subnet.Prefix - if prefix.IP.To4() == nil { - // IPv6: normalize to /64 - prefix.Mask = net.CIDRMask(64, 128) - prefix.IP = prefix.IP.Mask(prefix.Mask) // zero out host bits - } - prefixStr := prefix.String() // e.g., fd00:da04:74ff:0::/64 - - // Check if it's IPv6 - if subnet.Prefix.IP.To4() == nil { - // IPv6: replace /128 with /64 if present - prefixStr = strings.Replace(prefixStr, "/128", "/64", 1) - } - // Choose route based on IP family routeDest := defaultRouteCIDR if subnet.Prefix.IP.To4() == nil { @@ -312,7 +296,7 @@ func (nm *networkManager) configureHcnNetwork(nwInfo *EndpointInfo, extIf *exter } hnsSubnet := hcn.Subnet{ - IpAddressPrefix: prefixStr, + IpAddressPrefix: subnet.Prefix.String(), // Set the Gateway route Routes: []hcn.Route{ { From 6c23bccd445ff43cc8c6ef940f102c8dbbc707db Mon Sep 17 00:00:00 2001 From: Aleksandar Himel Date: Thu, 20 Nov 2025 19:40:59 +0100 Subject: [PATCH 03/47] reverting extra nl added by mistake --- network/endpoint_windows.go | 1 - 1 file changed, 1 deletion(-) diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index 6c3536383d..edd52327f2 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -343,7 +343,6 @@ func (nw *network) configureHcnEndpoint(epInfo *EndpointInfo) (*hcn.HostComputeE for _, ipAddress := range epInfo.IPAddresses { prefixLength, _ := ipAddress.Mask.Size() - ipConfiguration := hcn.IpConfig{ IpAddress: ipAddress.IP.String(), PrefixLength: uint8(prefixLength), From b1aa9d9a4399c26fea7e01325c85429c17af6498 Mon Sep 17 00:00:00 2001 From: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:41:44 -0700 Subject: [PATCH 04/47] Extending the MTPNC status (#4058) * Extending the MTPNC status * Adding generated crd * Adding to poduid to Spec * Adding crd changes * Update crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> * Update crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> --------- Signed-off-by: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../api/v1alpha1/multitenantpodnetworkconfig.go | 5 +++++ ...itenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go index c217f2dafa..f1f76b5d4c 100644 --- a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go +++ b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go @@ -48,6 +48,8 @@ type MultitenantPodNetworkConfigSpec struct { // MAC addresses of the IB devices to use for a pod // +kubebuilder:validation:Optional IBMACAddresses []string `json:"IBMACAddresses,omitempty"` + // PodUID is the UID of the pod + PodUID string `json:"podUID,omitempty"` } // +kubebuilder:validation:Enum=Unprogrammed;Programming;Programmed;Unprogramming;Failed @@ -105,6 +107,9 @@ type MultitenantPodNetworkConfigStatus struct { // DefaultDenyACL bool indicates whether default deny policy will be present on the pods upon pod creation // +kubebuilder:validation:Optional DefaultDenyACL bool `json:"defaultDenyACL"` + // NodeName is the name of the node where the pod is scheduled + // +kubebuilder:validation:Optional + NodeName string `json:"nodeName,omitempty"` } func init() { diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml index 9f96d6a691..9d754a9773 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml @@ -70,6 +70,9 @@ spec: podNetworkInstance: description: name of PNI object from requesting cx pod type: string + podUID: + description: PodUID is the UID of the pod + type: string required: - podNetwork type: object @@ -135,6 +138,9 @@ spec: ncID: description: Deprecated - use InterfaceInfos type: string + nodeName: + description: NodeName is the name of the node where the pod is scheduled + type: string primaryIP: description: Deprecated - use InterfaceInfos type: string From 20b53683c4ea21cb07252d8d0996913590eccda1 Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:51:14 -0700 Subject: [PATCH 05/47] ci: skip portforward in windows dualstack (#4076) * skip portforward in windows dualstack * modify ob pipeline to skip as well --- .../dualstack-overlay/dualstackoverlay-e2e-job-template.yaml | 2 +- .../dualstack-overlay/dualstackoverlay-e2e.stages.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml index 5302998d16..5fd8dae123 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml @@ -71,7 +71,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} dualstack: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX not WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. Covered by go test in E2E step template dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases service: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. hostport: true hybridWin: ${{ eq(parameters.os, 'windows') }} diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml index 4b368ba77e..123f3d572d 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml @@ -76,7 +76,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} dualstack: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX not WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. Covered by go test in E2E step template dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases service: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. hostport: true hybridWin: ${{ eq(parameters.os, 'windows') }} From 914028034722206f6d2fb6fa5a524110ac03cc5b Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Mon, 13 Oct 2025 16:34:05 -0700 Subject: [PATCH 06/47] ci: add cilium ebpf e2e overlay and podsubnet test (#4073) * add initial ebpf overlay modifications * hardcode values for testing * add depends to delete * make ebpf use cilium v1.17 * hardcode cluster name for testing * export makefile variables * add container for cilium command * add default os sku without the default value for osSKU, it is set to the empty string which is not "undefined", and so the the makefile's ?= default value is not used either, leading to the os SKU in the makefile being "" * move pod cidr to variable * add ccnp to deploys to block wireserver * amend ccnp to not block imds yet * Revert "hardcode cluster name for testing" This reverts commit 5945c63317595d5ae8f9ec8ef1eab7e4da14f4b3. * Revert "hardcode values for testing" This reverts commit f3ddf71bc1475a253cb7b87d0a7e9a4144411290. * rename targets and files * apply crd before custom resource * test ebpf with test iptables monitor and merger images * begin templating- previous commit passed * remove hubble and nightly copied code * remove unused vars and rename target (noop) * test ebpf podsubnet * reuse existing template the only difference between the two is overlay has check that deletes the cilium test namespace and validates the state-- otherwise the cilium tests template is identical previous commit passed * increase timeout to 20 minutes * swap podsubnet to ubuntu 24 * register ubuntu 2404 preview feature --- .pipelines/pipeline.yaml | 35 +- .../cilium-ebpf/cilium-e2e-job-template.yaml | 89 + .../cilium-ebpf/cilium-e2e-step-template.yaml | 45 + .../cilium-overlay-e2e-job-template.yaml | 86 + .../cilium-overlay-e2e-step-template.yaml | 48 + .pipelines/templates/create-cluster.yaml | 3 +- .pipelines/templates/setup-environment.yaml | 22 + hack/aks/Makefile | 31 +- hack/aks/deploy.mk | 49 + .../common/allowed-iptables-patterns.yaml | 61 + .../cilium/v1.17/ebpf/common/ccnps.yaml | 19 + .../ebpf/common/cilium-agent-clusterrole.yaml | 125 + .../ciliumclusterwidenetworkpolicies.yaml | 7054 +++++++++++++++++ .../cilium/v1.17/ebpf/overlay/cilium.yaml | 538 ++ ...azure-ip-masq-agent-config-reconciled.yaml | 13 + .../ebpf/overlay/static/cilium-config.yaml | 166 + .../cilium/v1.17/ebpf/podsubnet/cilium.yaml | 508 ++ .../azure-dns-imds-ip-masq-agent-config.yaml | 26 + .../ebpf/podsubnet/static/cilium-config.yaml | 166 + test/validate/linux_validate.go | 1 + 20 files changed, 9077 insertions(+), 8 deletions(-) create mode 100644 .pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml create mode 100644 .pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml create mode 100644 .pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml create mode 100644 .pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-step-template.yaml create mode 100644 .pipelines/templates/setup-environment.yaml create mode 100644 hack/aks/deploy.mk create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/common/allowed-iptables-patterns.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/common/ccnps.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/common/cilium-agent-clusterrole.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/common/ciliumclusterwidenetworkpolicies.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/overlay/cilium.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/overlay/static/cilium-config.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/podsubnet/cilium.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml create mode 100644 test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/cilium-config.yaml diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index b0c72e08b3..4bf015e1f0 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -278,8 +278,18 @@ stages: vmSize: Standard_B2ms k8sVersion: "" dependsOn: ["test"] + + # Cilium EBPF Podsubnet E2E tests + - template: singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml + parameters: + name: "cilium_ebpf_podsubnet_e2e" + displayName: Cilium EBPF Podsubnet Ubuntu 24 + clusterType: swift-byocni-nokubeproxy-up + clusterName: "cilbpfpode2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: ["test"] - # Cilium Nodesubnet E2E tests - template: singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-job-template.yaml parameters: @@ -313,8 +323,19 @@ stages: vmSize: Standard_B2ms k8sVersion: "" dependsOn: ["test"] + + # Cilium EBPF Overlay E2E tests + - template: singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml + parameters: + name: "cilium_ebpf_overlay_e2e" + displayName: Cilium EBPF on AKS Overlay Azure Linux + clusterType: overlay-byocni-nokubeproxy-up + clusterName: "cilbpfovere2e" + vmSize: Standard_B2ms + k8sVersion: "" + dependsOn: ["test"] - # Cilium Overlay with hubble E2E tests + # Cilium Overlay with hubble E2E tests - template: singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-job-template.yaml parameters: name: "cilium_h_overlay_e2e" @@ -447,8 +468,10 @@ stages: - aks_swift_e2e - cilium_e2e - cilium_vnetscale_e2e + - cilium_ebpf_podsubnet_e2e - cilium_nodesubnet_e2e - cilium_overlay_e2e + - cilium_ebpf_overlay_e2e - cilium_h_overlay_e2e - aks_ubuntu_22_linux_e2e - aks_swift_vnetscale_e2e @@ -473,6 +496,10 @@ stages: name: cilium_vnetscale_e2e clusterName: "ciliumvscalee2e" region: $(REGION_AKS_CLUSTER_TEST) + cilium_ebpf_podsubnet_e2e: + name: cilium_ebpf_podsubnet_e2e + clusterName: "cilbpfpode2e" + region: $(REGION_AKS_CLUSTER_TEST) cilium_nodesubnet_e2e: name: cilium_nodesubnet_e2e clusterName: "cilndsubnete2e" @@ -481,6 +508,10 @@ stages: name: cilium_overlay_e2e clusterName: "cilovere2e" region: $(REGION_AKS_CLUSTER_TEST) + cilium_ebpf_overlay_e2e: + name: cilium_ebpf_overlay_e2e + clusterName: "cilbpfovere2e" + region: $(REGION_AKS_CLUSTER_TEST) cilium_h_overlay_e2e: name: cilium_h_overlay_e2e clusterName: "cilwhleovere2e" diff --git a/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml new file mode 100644 index 0000000000..6b9a200d17 --- /dev/null +++ b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml @@ -0,0 +1,89 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + os: "linux" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + osSKU: "Ubuntu2404" + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - publish + - ${{ parameters.clusterName }} + variables: + TAG: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ] + CURRENT_VERSION: $[ stagedependencies.containerize.check_tag.outputs['CurrentTagManifests.currentTagManifests'] ] + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION)) + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + jobs: + - job: ${{ parameters.name }} + displayName: Cilium Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: cilium-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + scaleup: 50 + + - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + steps: + - template: ../../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml new file mode 100644 index 0000000000..ff825116f9 --- /dev/null +++ b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml @@ -0,0 +1,45 @@ +parameters: + name: "" + clusterName: "" + scaleup: "" + +steps: + - template: ../../templates/setup-environment.yaml + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get pods -Aowide + + # see makefile + export AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY=acnpublic.azurecr.io + export AZURE_IPTABLES_MONITOR_TAG=$(make azure-iptables-monitor-version) + make -C ./hack/aks deploy-ebpf-podsubnet-cilium + + kubectl get pods -Aowide + name: "installCilium" + displayName: "Install EBPF Podsubnet Cilium" + + - template: ../../templates/cilium-cli.yaml + + - script: | + echo "Start Azilium E2E Tests" + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_AZILIUM=true AZURE_IPAM_VERSION=$(make azure-ipam-version) CNS_VERSION=$(make cns-version) CLEANUP=true + retryCountOnTaskFailure: 3 + name: "aziliumTest" + displayName: "Run Azilium E2E" + + - template: ../../templates/cilium-tests.yaml + parameters: + clusterName: ${{ parameters.clusterName }} + scaleup: ${{ parameters.scaleup }} diff --git a/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml b/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml new file mode 100644 index 0000000000..cf15021dc8 --- /dev/null +++ b/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml @@ -0,0 +1,86 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + os: "linux" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + osSKU: "AzureLinux" + region: $(REGION_AKS_CLUSTER_TEST) + + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - publish + - ${{ parameters.clusterName }} + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + jobs: + - job: ${{ parameters.name }} + displayName: Cilium EBPF Overlay Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: cilium-overlay-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + scaleup: 50 + + - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + - cni_${{ parameters.os }} + condition: failed() + steps: + - template: ../../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cilium diff --git a/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-step-template.yaml new file mode 100644 index 0000000000..8a223af1aa --- /dev/null +++ b/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-step-template.yaml @@ -0,0 +1,48 @@ +parameters: + name: "" + clusterName: "" + scaleup: "" + + +steps: + - template: ../../templates/setup-environment.yaml + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get pods -Aowide + + # see makefile + export AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY=acnpublic.azurecr.io + export AZURE_IPTABLES_MONITOR_TAG=$(make azure-iptables-monitor-version) + export AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY=acnpublic.azurecr.io + export AZURE_IP_MASQ_MERGER_TAG=$(make azure-ip-masq-merger-version) + make -C ./hack/aks deploy-ebpf-overlay-cilium + + kubectl get pods -Aowide + name: "installCilium" + displayName: "Install Cilium EBPF on AKS Overlay" + + - template: ../../templates/cilium-cli.yaml + + - script: | + CNS=$(make cns-version) IPAM=$(make azure-ipam-version) + kubectl get pods -Aowide + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_OVERLAY=true AZURE_IPAM_VERSION=${IPAM} CNS_VERSION=${CNS} CLEANUP=true + retryCountOnTaskFailure: 3 + name: "aziliumTest" + displayName: "Deploy CNS and Run Azilium E2E on AKS Overlay" + + - template: ../../templates/cilium-tests.yaml + parameters: + clusterName: ${{ parameters.clusterName }} + scaleup: ${{ parameters.scaleup }} diff --git a/.pipelines/templates/create-cluster.yaml b/.pipelines/templates/create-cluster.yaml index 60573922a3..3b34c16553 100644 --- a/.pipelines/templates/create-cluster.yaml +++ b/.pipelines/templates/create-cluster.yaml @@ -7,6 +7,7 @@ parameters: vmSizeWin: "" k8sVersion: "" osSkuWin: "Windows2022" # Currently we only support Windows2022 + osSKU: "Ubuntu" dependsOn: "" region: "" os: linux @@ -44,7 +45,7 @@ jobs: AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) \ CLUSTER=${{ parameters.clusterName }} \ VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ - OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{ parameters.os }} + OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{ parameters.os }} OS_SKU=${{ parameters.osSKU }} echo "Cluster successfully created" displayName: Cluster - ${{ parameters.clusterType }} diff --git a/.pipelines/templates/setup-environment.yaml b/.pipelines/templates/setup-environment.yaml new file mode 100644 index 0000000000..1bc235e00b --- /dev/null +++ b/.pipelines/templates/setup-environment.yaml @@ -0,0 +1,22 @@ +steps: + - bash: | + echo $UID + sudo rm -rf $(System.DefaultWorkingDirectory)/* + displayName: "Set up OS environment" + + - checkout: self + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest diff --git a/hack/aks/Makefile b/hack/aks/Makefile index d0ce352528..a5011611f9 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -1,3 +1,5 @@ +include deploy.mk + .DEFAULT_GOAL: help # construct containerized azcli command @@ -50,6 +52,7 @@ COMMON_AKS_FIELDS = $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --no-ssh-key \ --os-sku $(OS_SKU) \ $(LTS_ARGS) +POD_CIDR = 192.168.0.0/16 ##@ Help @@ -147,6 +150,7 @@ nodesubnet-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an Node --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/Ubuntu2404Preview \ --yes @$(MAKE) set-kubeconf @@ -155,7 +159,7 @@ overlay-byocni-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay BYO CNI clu --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ - --pod-cidr 192.168.0.0/16 \ + --pod-cidr $(POD_CIDR) \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --yes @$(MAKE) set-kubeconf @@ -168,9 +172,10 @@ overlay-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ - --pod-cidr 192.168.0.0/16 \ + --pod-cidr $(POD_CIDR) \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/Ubuntu2404Preview \ --yes @$(MAKE) set-kubeconf @@ -180,17 +185,31 @@ overlay-cilium-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay Cilium clus --network-plugin azure \ --network-dataplane cilium \ --network-plugin-mode overlay \ - --pod-cidr 192.168.0.0/16 \ + --pod-cidr $(POD_CIDR) \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --yes @$(MAKE) set-kubeconf +ebpf-overlay-cilium-up: rg-up ipv4 overlay-net-up ## Brings up an EBPF Overlay Cilium cluster + $(COMMON_AKS_FIELDS) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ + --network-plugin azure \ + --network-dataplane cilium \ + --network-plugin-mode overlay \ + --pod-cidr $(POD_CIDR) \ + --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AdvancedNetworkingPerformancePreview \ + --enable-acns \ + --acns-datapath-acceleration-mode BpfVeth \ + --yes + @$(MAKE) set-kubeconf + overlay-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay AzCNI cluster $(COMMON_AKS_FIELDS) \ --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-plugin-mode overlay \ - --pod-cidr 192.168.0.0/16 \ + --pod-cidr $(POD_CIDR) \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --yes @$(MAKE) set-kubeconf @@ -217,6 +236,7 @@ swift-byocni-nokubeproxy-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT BYO CNI --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/Ubuntu2404Preview \ --yes @$(MAKE) set-kubeconf @@ -286,6 +306,7 @@ vnetscale-swift-byocni-nokubeproxy-up: rg-up ipv4 vnetscale-swift-net-up ## Brin --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ --pod-ip-allocation-mode StaticBlock \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/Ubuntu2404Preview \ --yes @$(MAKE) set-kubeconf @@ -376,7 +397,7 @@ dualstack-byocni-nokubeproxy-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up a D --network-plugin-mode overlay \ --subscription $(SUB) \ --ip-families ipv4,ipv6 \ - --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview,AKSHTTPCustomFeatures=Microsoft.ContainerService/Ubuntu2404Preview \ --kube-proxy-config $(KUBE_PROXY_JSON_PATH) \ --yes @$(MAKE) set-kubeconf diff --git a/hack/aks/deploy.mk b/hack/aks/deploy.mk new file mode 100644 index 0000000000..3b02fad9ee --- /dev/null +++ b/hack/aks/deploy.mk @@ -0,0 +1,49 @@ +EBPF_CILIUM_DIR ?= 1.17 +# we don't use CILIUM_VERSION_TAG or CILIUM_IMAGE_REGISTRY because we want to use the version supported by ebpf +EBPF_CILIUM_VERSION_TAG ?= v1.17.7-250927 +EBPF_CILIUM_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking +IPV6_HP_BPF_VERSION ?= v0.0.1 +AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking +AZURE_IPTABLES_MONITOR_TAG ?= v0.0.3 +AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking +AZURE_IP_MASQ_MERGER_TAG ?= v0.0.1-0 +# so we can use in envsubst +export IPV6_HP_BPF_VERSION +export AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY +export AZURE_IPTABLES_MONITOR_TAG +export AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY +export AZURE_IP_MASQ_MERGER_TAG + +deploy-common-ebpf-cilium: + @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/cilium-agent/files/ + @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/cilium-operator/files/ +# set cilium version tag and registry here so they are visible as env vars to envsubst + CILIUM_VERSION_TAG=$(EBPF_CILIUM_VERSION_TAG) CILIUM_IMAGE_REGISTRY=$(EBPF_CILIUM_IMAGE_REGISTRY) \ + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY},$${IPV6_HP_BPF_VERSION}' < \ + ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/cilium-operator/templates/deployment.yaml \ + | kubectl apply -f - + @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/ebpf/common/ciliumclusterwidenetworkpolicies.yaml + @kubectl wait --for=condition=Established crd/ciliumclusterwidenetworkpolicies.cilium.io + @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/ebpf/common/ + +deploy-ebpf-overlay-cilium: deploy-common-ebpf-cilium + @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/ebpf/overlay/static/ + CILIUM_VERSION_TAG=$(EBPF_CILIUM_VERSION_TAG) CILIUM_IMAGE_REGISTRY=$(EBPF_CILIUM_IMAGE_REGISTRY) \ + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY},$${IPV6_HP_BPF_VERSION},$${AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY},$${AZURE_IPTABLES_MONITOR_TAG},$${AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY},$${AZURE_IP_MASQ_MERGER_TAG}' < \ + ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/ebpf/overlay/cilium.yaml \ + | kubectl apply -f - + @$(MAKE) wait-for-cilium + +deploy-ebpf-podsubnet-cilium: deploy-common-ebpf-cilium + @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/ebpf/podsubnet/static/ +# ebpf podsubnet does not have ip masq merger + CILIUM_VERSION_TAG=$(EBPF_CILIUM_VERSION_TAG) CILIUM_IMAGE_REGISTRY=$(EBPF_CILIUM_IMAGE_REGISTRY) \ + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY},$${IPV6_HP_BPF_VERSION},$${AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY},$${AZURE_IPTABLES_MONITOR_TAG}' < \ + ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/ebpf/podsubnet/cilium.yaml \ + | kubectl apply -f - + @$(MAKE) wait-for-cilium + +wait-for-cilium: + @kubectl rollout status deployment/cilium-operator -n kube-system --timeout=1200s + @kubectl rollout status daemonset/cilium -n kube-system --timeout=1200s + diff --git a/test/integration/manifests/cilium/v1.17/ebpf/common/allowed-iptables-patterns.yaml b/test/integration/manifests/cilium/v1.17/ebpf/common/allowed-iptables-patterns.yaml new file mode 100644 index 0000000000..cd8b1ca589 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/common/allowed-iptables-patterns.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +data: + filter: | + ^.*--comment.*kubernetes load balancer firewall + ^.*--comment.*kubernetes health check service ports + ^.*--comment.*kubernetes externally visible service portals + ^.*--comment.*kubernetes forwarding rules + ^.*--comment.*kubernetes forwarding conntrack rule + ^.*--comment.*kubernetes service portals + ^.*--comment.*kubernetes externally-visible service portals + + -A INPUT -j KUBE-FIREWALL + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 32526 -j DROP + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 80 -j DROP + -A OUTPUT -j KUBE-FIREWALL + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + global: | + ^-N .* + ^-P .* + ^.*--comment.*cilium: + ^.*--comment.*cilium-feeder: + ^.*--comment.*AKS managed: added by AgentBaker + mangle: | + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 80 -j DROP + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 32526 -j DROP + nat: | + ^.*--comment.*metrics-server + ^.*--comment.*kube-dns + ^.*--comment.*gatekeeper-webhook-service + ^.*--comment.*azure-policy-webhook-service + ^.*--comment.*kubernetes:https cluster IP + ^.*--comment.*kubernetes forwarding rules + ^.*--comment.*kubernetes service traffic requiring SNAT + ^.*--comment.*kubernetes postrouting rules + ^.*--set-xmark 0x4000 + ^.*--comment.*kubernetes service portals + ^.*--comment.*kubernetes service nodeports + ^.*--comment.*kubernetes:https + ^.*--comment.*ip-masq-agent + ^.*0x4000/0x4000 + -A POSTROUTING -j SWIFT + -A SWIFT -s + -A POSTROUTING -j SWIFT-POSTROUTING + -A SWIFT-POSTROUTING -s + raw: "" + security: | + -A OUTPUT -d 168.63.129.16/32 -p tcp -m tcp --dport 53 -j ACCEPT + -A OUTPUT -d 168.63.129.16/32 -p tcp -m owner --uid-owner 0 -j ACCEPT + -A OUTPUT -d 168.63.129.16/32 -p tcp -m conntrack --ctstate INVALID,NEW -j DROP +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: allowed-iptables-patterns + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/ebpf/common/ccnps.yaml b/test/integration/manifests/cilium/v1.17/ebpf/common/ccnps.yaml new file mode 100644 index 0000000000..cc0b65e95b --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/common/ccnps.yaml @@ -0,0 +1,19 @@ +apiVersion: "cilium.io/v2" +kind: CiliumClusterwideNetworkPolicy +metadata: + name: block-azure-destinations +spec: + description: "Block TCP access to Azure destinations from all pods" + endpointSelector: {} # Applies to all pods in all namespaces + enableDefaultDeny: + egress: false + ingress: false + egressDeny: + - toCIDR: + - 168.63.129.16/32 # Azure DNS + toPorts: + - ports: + - port: "80" + protocol: TCP + - port: "32526" + protocol: TCP diff --git a/test/integration/manifests/cilium/v1.17/ebpf/common/cilium-agent-clusterrole.yaml b/test/integration/manifests/cilium/v1.17/ebpf/common/cilium-agent-clusterrole.yaml new file mode 100644 index 0000000000..30a5fecb72 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/common/cilium-agent-clusterrole.yaml @@ -0,0 +1,125 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: cilium + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - get +- apiGroups: + - cilium.io + resources: + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumloadbalancerippools + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - patch diff --git a/test/integration/manifests/cilium/v1.17/ebpf/common/ciliumclusterwidenetworkpolicies.yaml b/test/integration/manifests/cilium/v1.17/ebpf/common/ciliumclusterwidenetworkpolicies.yaml new file mode 100644 index 0000000000..1027fc52ed --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/common/ciliumclusterwidenetworkpolicies.yaml @@ -0,0 +1,7054 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + eno.azure.io/disable-updates: "true" + name: ciliumclusterwidenetworkpolicies.cilium.io +spec: + group: cilium.io + names: + categories: + - cilium + - ciliumpolicy + kind: CiliumClusterwideNetworkPolicy + listKind: CiliumClusterwideNetworkPolicyList + plural: ciliumclusterwidenetworkpolicies + shortNames: + - ccnp + singular: ciliumclusterwidenetworkpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Valid')].status + name: Valid + type: string + name: v2 + schema: + openAPIV3Schema: + description: |- + CiliumClusterwideNetworkPolicy is a Kubernetes third-party resource with an + modified version of CiliumNetworkPolicy which is cluster scoped rather than + namespace scoped. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + anyOf: + - properties: + ingress: {} + required: + - ingress + - properties: + ingressDeny: {} + required: + - ingressDeny + - properties: + egress: {} + required: + - egress + - properties: + egressDeny: {} + required: + - egressDeny + description: Spec is the desired Cilium specific rule specification. + oneOf: + - properties: + endpointSelector: {} + required: + - endpointSelector + - properties: + nodeSelector: {} + required: + - nodeSelector + properties: + description: + description: |- + Description is a free form string, it can be used by the creator of + the rule to store human readable explanation of the purpose of this + rule. Rules cannot be identified by comment. + type: string + egress: + description: |- + Egress is a list of EgressRule which are enforced at egress. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + authentication: + description: Authentication is the required authentication type + for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode for + the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toFQDNs: + description: |- + ToFQDN allows whitelisting DNS names in place of IPs. The IPs that result + from DNS resolution of `ToFQDN.MatchName`s are added to the same + EgressRule object as ToCIDRSet entries, and behave accordingly. Any L4 and + L7 rules within this EgressRule will also apply to these IPs. + The DNS -> IP mapping is re-resolved periodically from within the + cilium-agent, and the IPs in the DNS response are effected in the policy + for selected pods as-is (i.e. the list of IPs is not modified in any way). + Note: An explicit rule to allow for DNS traffic is needed for the pods, as + ToFQDN counts as an egress rule and will enforce egress policy when + PolicyEnforcment=default. + Note: If the resolved IPs are IPs within the kubernetes cluster, the + ToFQDN rule will not apply to that IP. + Note: ToFQDN cannot occur in the same policy as other To* rules. + items: + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + connect to. + + Example: + Any endpoint with the label "role=frontend" is allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed DNS + lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of the + secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and namespace + pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by k8s + labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector for + k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + egressDeny: + description: |- + EgressDeny is a list of EgressDenyRule which are enforced at egress. + Any rule inserted here will be denied regardless of the allowed egress + rules in the 'egress' field. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressDenyRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is not allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to connect + to. + + Example: + Any endpoint with the label "role=frontend" is not allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and namespace + pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by k8s + labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector for + k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + enableDefaultDeny: + description: |- + EnableDefaultDeny determines whether this policy configures the + subject endpoint(s) to have a default deny mode. If enabled, + this causes all traffic not explicitly allowed by a network policy + to be dropped. + + If not specified, the default is true for each traffic direction + that has rules, and false otherwise. For example, if a policy + only has Ingress or IngressDeny rules, then the default for + ingress is true and egress is false. + + If multiple policies apply to an endpoint, that endpoint's default deny + will be enabled if any policy requests it. + + This is useful for creating broad-based network policies that will not + cause endpoints to enter default-deny mode. + properties: + egress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to egress traffic. + type: boolean + ingress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to ingress traffic. + type: boolean + type: object + endpointSelector: + description: |- + EndpointSelector selects all endpoints which should be subject to + this rule. EndpointSelector and NodeSelector cannot be both empty and + are mutually exclusive. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + ingress: + description: |- + Ingress is a list of IngressRule which are enforced at ingress. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + authentication: + description: Authentication is the required authentication type + for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode for + the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + connections on port 80/tcp. + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed DNS + lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of the + secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + type: object + type: array + ingressDeny: + description: |- + IngressDeny is a list of IngressDenyRule which are enforced at ingress. + Any rule inserted here will be denied regardless of the allowed ingress + rules in the 'ingress' field. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressDenyRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet, FromGroups and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + connections on port 80/tcp. + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + type: object + type: array + labels: + description: |- + Labels is a list of optional strings which can be used to + re-identify the rule or to store metadata. It is possible to lookup + or delete strings based on labels. Labels are not required to be + unique, multiple rules can have overlapping or identical labels. + items: + description: Label is the Cilium's representation of a container + label. + properties: + key: + type: string + source: + description: 'Source can be one of the above values (e.g.: LabelSourceContainer).' + type: string + value: + type: string + required: + - key + type: object + type: array + nodeSelector: + description: |- + NodeSelector selects all nodes which should be subject to this rule. + EndpointSelector and NodeSelector cannot be both empty and are mutually + exclusive. Can only be used in CiliumClusterwideNetworkPolicies. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + specs: + description: Specs is a list of desired Cilium specific rule specification. + items: + anyOf: + - properties: + ingress: {} + required: + - ingress + - properties: + ingressDeny: {} + required: + - ingressDeny + - properties: + egress: {} + required: + - egress + - properties: + egressDeny: {} + required: + - egressDeny + description: |- + Rule is a policy rule which must be applied to all endpoints which match the + labels contained in the endpointSelector + + Each rule is split into an ingress section which contains all rules + applicable at ingress, and an egress section applicable at egress. For rule + types such as `L4Rule` and `CIDR` which can be applied at both ingress and + egress, both ingress and egress side have to either specifically allow the + connection or one side has to be omitted. + + Either ingress, egress, or both can be provided. If both ingress and egress + are omitted, the rule has no effect. + oneOf: + - properties: + endpointSelector: {} + required: + - endpointSelector + - properties: + nodeSelector: {} + required: + - nodeSelector + properties: + description: + description: |- + Description is a free form string, it can be used by the creator of + the rule to store human readable explanation of the purpose of this + rule. Rules cannot be identified by comment. + type: string + egress: + description: |- + Egress is a list of EgressRule which are enforced at egress. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + authentication: + description: Authentication is the required authentication + type for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode + for the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toFQDNs: + description: |- + ToFQDN allows whitelisting DNS names in place of IPs. The IPs that result + from DNS resolution of `ToFQDN.MatchName`s are added to the same + EgressRule object as ToCIDRSet entries, and behave accordingly. Any L4 and + L7 rules within this EgressRule will also apply to these IPs. + The DNS -> IP mapping is re-resolved periodically from within the + cilium-agent, and the IPs in the DNS response are effected in the policy + for selected pods as-is (i.e. the list of IPs is not modified in any way). + Note: An explicit rule to allow for DNS traffic is needed for the pods, as + ToFQDN counts as an egress rule and will enforce egress policy when + PolicyEnforcment=default. + Note: If the resolved IPs are IPs within the kubernetes cluster, the + ToFQDN rule will not apply to that IP. + Note: ToFQDN cannot occur in the same policy as other To* rules. + items: + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + connect to. + + Example: + Any endpoint with the label "role=frontend" is allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed + DNS lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of + the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and + namespace pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by + k8s labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector + for k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + egressDeny: + description: |- + EgressDeny is a list of EgressDenyRule which are enforced at egress. + Any rule inserted here will be denied regardless of the allowed egress + rules in the 'egress' field. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressDenyRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is not allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to connect + to. + + Example: + Any endpoint with the label "role=frontend" is not allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and + namespace pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by + k8s labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector + for k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + enableDefaultDeny: + description: |- + EnableDefaultDeny determines whether this policy configures the + subject endpoint(s) to have a default deny mode. If enabled, + this causes all traffic not explicitly allowed by a network policy + to be dropped. + + If not specified, the default is true for each traffic direction + that has rules, and false otherwise. For example, if a policy + only has Ingress or IngressDeny rules, then the default for + ingress is true and egress is false. + + If multiple policies apply to an endpoint, that endpoint's default deny + will be enabled if any policy requests it. + + This is useful for creating broad-based network policies that will not + cause endpoints to enter default-deny mode. + properties: + egress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to egress traffic. + type: boolean + ingress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to ingress traffic. + type: boolean + type: object + endpointSelector: + description: |- + EndpointSelector selects all endpoints which should be subject to + this rule. EndpointSelector and NodeSelector cannot be both empty and + are mutually exclusive. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + ingress: + description: |- + Ingress is a list of IngressRule which are enforced at ingress. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + authentication: + description: Authentication is the required authentication + type for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode + for the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + connections on port 80/tcp. + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed + DNS lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of + the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + type: object + type: array + ingressDeny: + description: |- + IngressDeny is a list of IngressDenyRule which are enforced at ingress. + Any rule inserted here will be denied regardless of the allowed ingress + rules in the 'ingress' field. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressDenyRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet, FromGroups and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + connections on port 80/tcp. + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + type: object + type: array + labels: + description: |- + Labels is a list of optional strings which can be used to + re-identify the rule or to store metadata. It is possible to lookup + or delete strings based on labels. Labels are not required to be + unique, multiple rules can have overlapping or identical labels. + items: + description: Label is the Cilium's representation of a container + label. + properties: + key: + type: string + source: + description: 'Source can be one of the above values (e.g.: + LabelSourceContainer).' + type: string + value: + type: string + required: + - key + type: object + type: array + nodeSelector: + description: |- + NodeSelector selects all nodes which should be subject to this rule. + EndpointSelector and NodeSelector cannot be both empty and are mutually + exclusive. Can only be used in CiliumClusterwideNetworkPolicies. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + status: + description: |- + Status is the status of the Cilium policy rule. + + The reason this field exists in this structure is due a bug in the k8s + code-generator that doesn't create a `UpdateStatus` method because the + field does not exist in the structure. + properties: + conditions: + items: + properties: + lastTransitionTime: + description: The last time the condition transitioned from one + status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: The status of the condition, one of True, False, + or Unknown + type: string + type: + description: The type of the policy condition + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + derivativePolicies: + additionalProperties: + description: |- + CiliumNetworkPolicyNodeStatus is the status of a Cilium policy rule for a + specific node. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations corresponds to the Annotations in the ObjectMeta of the CNP + that have been realized on the node for CNP. That is, if a CNP has been + imported and has been assigned annotation X=Y by the user, + Annotations in CiliumNetworkPolicyNodeStatus will be X=Y once the + CNP that was imported corresponding to Annotation X=Y has been realized on + the node. + type: object + enforcing: + description: |- + Enforcing is set to true once all endpoints present at the time the + policy has been imported are enforcing this policy. + type: boolean + error: + description: |- + Error describes any error that occurred when parsing or importing the + policy, or realizing the policy for the endpoints to which it applies + on the node. + type: string + lastUpdated: + description: LastUpdated contains the last time this status + was updated + format: date-time + type: string + localPolicyRevision: + description: |- + Revision is the policy revision of the repository which first implemented + this policy. + format: int64 + type: integer + ok: + description: |- + OK is true when the policy has been parsed and imported successfully + into the in-memory policy repository on the node. + type: boolean + type: object + description: |- + DerivativePolicies is the status of all policies derived from the Cilium + policy + type: object + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/integration/manifests/cilium/v1.17/ebpf/overlay/cilium.yaml b/test/integration/manifests/cilium/v1.17/ebpf/overlay/cilium.yaml new file mode 100644 index 0000000000..2d80205e3d --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/overlay/cilium.yaml @@ -0,0 +1,538 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + kubernetes.azure.com/managedby: aks + name: cilium + namespace: kube-system +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + labels: + app.kubernetes.io/managed-by: Eno + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + kubernetes.azure.com/managedby: aks + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: KUBE_CLIENT_BACKOFF_BASE + value: "1" + - name: KUBE_CLIENT_BACKOFF_DURATION + value: "120" + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + - name: require-k8s-connectivity + value: "false" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + mountPropagation: HostToContainer + name: cilium-netns + - mountPath: /flowlog-config + name: hubble-flowlog-config + readOnly: true + - mountPath: /var/log/acns/hubble + name: networkflowlogs + - mountPath: /etc/config + name: azure-ip-masq-dir + readOnly: true + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -checkMap=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - mountPath: /azure-block-iptables-bpf-map + name: iptables-block-bpf-map + readOnly: true + - command: + - ./azure-ip-masq-merger + - -v + - "2" + image: $AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY/azure-ip-masq-merger:$AZURE_IP_MASQ_MERGER_TAG + imagePullPolicy: IfNotPresent + name: azure-ip-masq-merger + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config/ + name: azure-ip-masq-agent-config-volume + - mountPath: /etc/merged-config/ + name: azure-ip-masq-dir + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - command: + - /azure-block-iptables + - -mode=attach + - -overwrite=true + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: iptables-blocker-init + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /proc + name: hostproc + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -terminateOnSuccess=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor-init + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - command: + - /install-plugin.sh + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: {} + securityContext: + capabilities: + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + - configMap: + defaultMode: 420 + name: acns-flowlog-config + optional: true + name: hubble-flowlog-config + - hostPath: + path: /var/log/acns/hubble + type: DirectoryOrCreate + name: networkflowlogs + - configMap: + defaultMode: 420 + name: allowed-iptables-patterns + optional: true + name: iptables-config + - hostPath: + path: /sys/fs/bpf/azure-block-iptables + type: DirectoryOrCreate + name: iptables-block-bpf-map + - emptyDir: {} + name: azure-ip-masq-dir + - name: azure-ip-masq-agent-config-volume + projected: + defaultMode: 420 + sources: + - configMap: + items: + - key: ip-masq-agent + mode: 444 + path: ip-masq-agent + name: azure-ip-masq-agent-config + optional: true + - configMap: + items: + - key: ip-masq-agent-reconciled + mode: 444 + path: ip-masq-agent-reconciled + name: azure-ip-masq-agent-config-reconciled + optional: true + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 5% + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.17/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml b/test/integration/manifests/cilium/v1.17/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml new file mode 100644 index 0000000000..e6d8edca6a --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +data: + ip-masq-agent-reconciled: | + MasqLinkLocal: true + NonMasqueradeCIDRs: + - 192.168.0.0/16 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/managed-by: Eno + component: ip-masq-agent + name: azure-ip-masq-agent-config-reconciled + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/ebpf/overlay/static/cilium-config.yaml b/test/integration/manifests/cilium/v1.17/ebpf/overlay/static/cilium-config.yaml new file mode 100644 index 0000000000..c0d38cc82d --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/overlay/static/cilium-config.yaml @@ -0,0 +1,166 @@ +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-algorithm-annotation: "false" + bpf-events-drop-enabled: "true" + bpf-events-policy-verdict-enabled: "true" + bpf-events-trace-enabled: "true" + bpf-lb-acceleration: disabled + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-lb-mode-annotation: "false" + bpf-lb-sock: "false" + bpf-lb-sock-hostns-only: "true" + bpf-lb-sock-terminate-pod-connections: "false" + bpf-lb-source-range-all-types: "false" + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + ces-slice-mode: fcfs + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: alewoverebpfcilcanary + cni-exclusive: "false" + cni-log-file: /var/run/cilium/cilium-cni.log + datapath-mode: veth + debug: "false" + direct-routing-skip-unreachable: "false" + disable-cnp-status-updates: "true" + disable-embedded-dns-proxy: "false" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "false" + egress-gateway-reconciliation-trigger-interval: 1s + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-bpf-masquerade: "true" + enable-cilium-endpoint-slice: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + enable-endpoint-routes: "true" + enable-experimental-lb: "false" + enable-health-check-loadbalancer-ip: "false" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "false" + enable-hubble: "true" + enable-hubble-open-metrics: "false" + enable-internal-traffic-policy: "true" + enable-ip-masq-agent: "true" + enable-ipv4: "true" + enable-ipv4-big-tcp: "false" + enable-ipv4-masquerade: "true" + enable-ipv6: "false" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "false" + enable-k8s-networkpolicy: "true" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "true" + enable-lb-ipam: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" + enable-masquerade-to-route-source: "false" + enable-metrics: "true" + enable-node-selector-labels: "false" + enable-non-default-deny-policies: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-remote-node-masquerade: "true" + enable-runtime-device-detection: "false" + enable-sctp: "false" + enable-session-affinity: "true" + enable-source-ip-verification: "false" + enable-standalone-dns-proxy: "true" + enable-svc-source-range-check: "true" + enable-tcx: "false" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-wireguard: "false" + enable-xt-socket-fallback: "true" + external-envoy-proxy: "false" + health-check-icmp-failure-threshold: "3" + hubble-disable-tls: "false" + hubble-event-buffer-capacity: "4095" + hubble-export-file-max-backups: "5" + hubble-export-file-max-size-mb: "10" + hubble-flowlogs-config-path: /flowlog-config/flowlogs.yaml + hubble-listen-address: :4244 + hubble-metrics: flow:sourceEgressContext=pod;destinationIngressContext=pod tcp:sourceEgressContext=pod;destinationIngressContext=pod + drop:sourceEgressContext=pod;destinationIngressContext=pod dns:sourceEgressContext=pod;destinationIngressContext=pod + hubble-metrics-server: :9965 + hubble-socket-path: /var/run/cilium/hubble.sock + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + ipam-cilium-node-update-rate: 15s + ipv4-native-routing-cidr: 192.168.0.0/16 + k8s-client-burst: "20" + k8s-client-qps: "10" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: 0.0.0.0:10256 + local-router-ipv4: 169.254.23.0 + mesh-auth-enabled: "false" + mesh-auth-gc-interval: 5m0s + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + metrics: +cilium_bpf_map_pressure +cilium_proxy_datapath_update_timeout_total + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + nat-map-stats-entries: "32" + nat-map-stats-interval: 30s + node-port-bind-protection: "true" + nodeport-addresses: "" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + proxy-connect-timeout: "2" + proxy-idle-timeout-seconds: "60" + proxy-max-connection-duration-seconds: "0" + proxy-max-requests-per-connection: "0" + proxy-xff-num-trusted-hops-egress: "0" + proxy-xff-num-trusted-hops-ingress: "0" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" + tofqdns-proxy-port: "40046" + tofqdns-proxy-response-max-delay: 100ms + tofqdns-server-port: "40045" + unmanaged-pod-watcher-interval: "0" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/cilium.yaml b/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/cilium.yaml new file mode 100644 index 0000000000..3e3054f41e --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/cilium.yaml @@ -0,0 +1,508 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + kubernetes.azure.com/managedby: aks + name: cilium + namespace: kube-system +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + labels: + app.kubernetes.io/managed-by: Eno + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + kubernetes.azure.com/managedby: aks + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: KUBE_CLIENT_BACKOFF_BASE + value: "1" + - name: KUBE_CLIENT_BACKOFF_DURATION + value: "120" + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + - name: require-k8s-connectivity + value: "false" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + mountPropagation: HostToContainer + name: cilium-netns + - mountPath: /flowlog-config + name: hubble-flowlog-config + readOnly: true + - mountPath: /var/log/acns/hubble + name: networkflowlogs + - mountPath: /etc/config + name: azure-ip-masq-dir + readOnly: true + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -checkMap=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - mountPath: /azure-block-iptables-bpf-map + name: iptables-block-bpf-map + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - command: + - /azure-block-iptables + - -mode=attach + - -overwrite=true + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: iptables-blocker-init + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /proc + name: hostproc + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -terminateOnSuccess=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor-init + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - command: + - /install-plugin.sh + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: {} + securityContext: + capabilities: + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + - configMap: + defaultMode: 420 + name: acns-flowlog-config + optional: true + name: hubble-flowlog-config + - hostPath: + path: /var/log/acns/hubble + type: DirectoryOrCreate + name: networkflowlogs + - configMap: + defaultMode: 420 + name: allowed-iptables-patterns + optional: true + name: iptables-config + - hostPath: + path: /sys/fs/bpf/azure-block-iptables + type: DirectoryOrCreate + name: iptables-block-bpf-map + - configMap: + defaultMode: 420 + name: azure-dns-imds-ip-masq-agent-config + optional: true + name: azure-ip-masq-dir + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 5% + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml b/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml new file mode 100644 index 0000000000..2613c3b265 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +data: + ip-masq-agent: "nonMasqueradeCIDRs:\n- 0.0.0.0/1\n- 128.0.0.0/3\n- 160.0.0.0/5\n- + 168.0.0.0/11\n- 168.32.0.0/12\n- 168.48.0.0/13\n- 168.56.0.0/14\n- 168.60.0.0/15\n- + 168.62.0.0/16\n- 168.63.0.0/17\n- 168.63.128.0/24\n- 168.63.129.0/29\n- 168.63.129.8/30\n- + 168.63.129.12/30\n- 168.63.129.17/32\n- 168.63.129.18/31\n- 168.63.129.20/30\n- + 168.63.129.24/29\n- 168.63.129.32/27\n- 168.63.129.64/26\n- 168.63.129.128/25\n- + 168.63.130.0/23\n- 168.63.132.0/22\n- 168.63.136.0/21\n- 168.63.144.0/20\n- 168.63.160.0/19\n- + 168.63.192.0/18\n- 168.64.0.0/10\n- 168.128.0.0/9\n- 169.0.0.0/9\n- 169.128.0.0/10\n- + 169.192.0.0/11\n- 169.224.0.0/12\n- 169.240.0.0/13\n- 169.248.0.0/14\n- 169.252.0.0/15\n- + 169.254.0.0/17\n- 169.254.128.0/19\n- 169.254.160.0/21\n- 169.254.168.0/24\n- + 169.254.169.0/25\n- 169.254.169.128/26\n- 169.254.169.192/27\n- 169.254.169.224/28\n- + 169.254.169.240/29\n- 169.254.169.248/30\n- 169.254.169.252/31\n- 169.254.169.255/32\n- + 169.254.170.0/23\n- 169.254.172.0/22\n- 169.254.176.0/20\n- 169.254.192.0/18\n- + 169.255.0.0/16\n- 170.0.0.0/7\n- 172.0.0.0/6\n- 176.0.0.0/4\n- 192.0.0.0/3\n- + 224.0.0.0/3\n \nmasqLinkLocal: true\nmasqLinkLocalIPv6: true\n" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: azure-dns-imds-ip-masq-agent-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/cilium-config.yaml b/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/cilium-config.yaml new file mode 100644 index 0000000000..14cb5d0105 --- /dev/null +++ b/test/integration/manifests/cilium/v1.17/ebpf/podsubnet/static/cilium-config.yaml @@ -0,0 +1,166 @@ +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-algorithm-annotation: "false" + bpf-events-drop-enabled: "true" + bpf-events-policy-verdict-enabled: "true" + bpf-events-trace-enabled: "true" + bpf-lb-acceleration: disabled + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-lb-mode-annotation: "false" + bpf-lb-sock: "false" + bpf-lb-sock-hostns-only: "true" + bpf-lb-sock-terminate-pod-connections: "false" + bpf-lb-source-range-all-types: "false" + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + ces-slice-mode: fcfs + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: alewpodsubebpfcilcanary + cni-exclusive: "false" + cni-log-file: /var/run/cilium/cilium-cni.log + datapath-mode: veth + debug: "false" + direct-routing-skip-unreachable: "false" + disable-cnp-status-updates: "true" + disable-embedded-dns-proxy: "false" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "false" + egress-gateway-reconciliation-trigger-interval: 1s + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-bpf-masquerade: "true" + enable-cilium-endpoint-slice: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + enable-endpoint-routes: "true" + enable-experimental-lb: "false" + enable-health-check-loadbalancer-ip: "false" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "false" + enable-hubble: "true" + enable-hubble-open-metrics: "false" + enable-internal-traffic-policy: "true" + enable-ip-masq-agent: "true" + enable-ipv4: "true" + enable-ipv4-big-tcp: "false" + enable-ipv4-masquerade: "true" + enable-ipv6: "false" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "false" + enable-k8s-networkpolicy: "true" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "true" + enable-lb-ipam: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" + enable-masquerade-to-route-source: "false" + enable-metrics: "true" + enable-node-selector-labels: "false" + enable-non-default-deny-policies: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-remote-node-masquerade: "false" + enable-runtime-device-detection: "false" + enable-sctp: "false" + enable-session-affinity: "true" + enable-source-ip-verification: "false" + enable-standalone-dns-proxy: "true" + enable-svc-source-range-check: "true" + enable-tcx: "false" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-wireguard: "false" + enable-xt-socket-fallback: "true" + external-envoy-proxy: "false" + health-check-icmp-failure-threshold: "3" + hubble-disable-tls: "false" + hubble-event-buffer-capacity: "4095" + hubble-export-file-max-backups: "5" + hubble-export-file-max-size-mb: "10" + hubble-flowlogs-config-path: /flowlog-config/flowlogs.yaml + hubble-listen-address: :4244 + hubble-metrics: flow:sourceEgressContext=pod;destinationIngressContext=pod tcp:sourceEgressContext=pod;destinationIngressContext=pod + drop:sourceEgressContext=pod;destinationIngressContext=pod dns:sourceEgressContext=pod;destinationIngressContext=pod + hubble-metrics-server: :9965 + hubble-socket-path: /var/run/cilium/hubble.sock + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + ipam-cilium-node-update-rate: 15s + ipv4-native-routing-cidr: 10.241.0.0/16 + k8s-client-burst: "20" + k8s-client-qps: "10" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: 0.0.0.0:10256 + local-router-ipv4: 169.254.23.0 + mesh-auth-enabled: "false" + mesh-auth-gc-interval: 5m0s + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + metrics: +cilium_bpf_map_pressure +cilium_proxy_datapath_update_timeout_total + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + nat-map-stats-entries: "32" + nat-map-stats-interval: 30s + node-port-bind-protection: "true" + nodeport-addresses: "" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + proxy-connect-timeout: "2" + proxy-idle-timeout-seconds: "60" + proxy-max-connection-duration-seconds: "0" + proxy-max-requests-per-connection: "0" + proxy-xff-num-trusted-hops-egress: "0" + proxy-xff-num-trusted-hops-ingress: "0" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" + tofqdns-proxy-port: "40046" + tofqdns-proxy-response-max-delay: 100ms + tofqdns-server-port: "40045" + unmanaged-pod-watcher-interval: "0" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/validate/linux_validate.go b/test/validate/linux_validate.go index 0f38a4c718..dccae9cff7 100644 --- a/test/validate/linux_validate.go +++ b/test/validate/linux_validate.go @@ -44,6 +44,7 @@ var linuxChecksMap = map[string][]check{ podLabelSelector: ciliumLabelSelector, podNamespace: privilegedNamespace, cmd: ciliumStateFileCmd, + containerName: "cilium-agent", }, { name: "cns cache", From 082edc7d4ff3865b005e81f6f7bdf52614fabb86 Mon Sep 17 00:00:00 2001 From: aggarwal0009 <127549148+aggarwal0009@users.noreply.github.com> Date: Wed, 15 Oct 2025 10:56:04 -0700 Subject: [PATCH 07/47] Add status to mtpnc crd (#4070) * add status to mtpnc crd * update mtpnc status * update mtpnc status only * fix lint formating errs * add crd manifest * update mtpnc statuses * fix status enum * address pr feedback * fix lint * add missing file --- .../api/v1alpha1/multitenantpodnetworkconfig.go | 17 +++++++++++++++++ crd/multitenancy/embed_test.go | 8 ++++---- ....azure.com_multitenantpodnetworkconfigs.yaml | 11 +++++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go index f1f76b5d4c..099e510419 100644 --- a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go +++ b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go @@ -63,6 +63,20 @@ const ( Failed InfinibandStatus = "Failed" ) +// MTPNCStatus indicates the high-level status of MultitenantPodNetworkConfig +// +kubebuilder:validation:Enum=Ready;Pending;InternalError;PNINotFound;PNINotReady;NodeCapacityExceeded;IPsExhausted +type MTPNCStatus string + +const ( + MTPNCStatusReady MTPNCStatus = "Ready" + MTPNCStatusPending MTPNCStatus = "Pending" + MTPNCStatusInternalError MTPNCStatus = "InternalError" + MTPNCStatusPNINotFound MTPNCStatus = "PNINotFound" + MTPNCStatusPNINotReady MTPNCStatus = "PNINotReady" + MTPNCStatusNodeCapacityExceeded MTPNCStatus = "NodeCapacityExceeded" + MTPNCStatusIPsExhausted MTPNCStatus = "IPsExhausted" +) + type InterfaceInfo struct { // NCID is the network container id NCID string `json:"ncID,omitempty"` @@ -110,6 +124,9 @@ type MultitenantPodNetworkConfigStatus struct { // NodeName is the name of the node where the pod is scheduled // +kubebuilder:validation:Optional NodeName string `json:"nodeName,omitempty"` + // Status represents the overall status of the MTPNC + // +kubebuilder:validation:Optional + Status MTPNCStatus `json:"status,omitempty"` } func init() { diff --git a/crd/multitenancy/embed_test.go b/crd/multitenancy/embed_test.go index d0ccc93841..d22ed35826 100644 --- a/crd/multitenancy/embed_test.go +++ b/crd/multitenancy/embed_test.go @@ -12,7 +12,7 @@ const mtpncFilename = "manifests/multitenancy.acn.azure.com_multitenantpodnetwor func TestEmbedMTPNC(t *testing.T) { b, err := os.ReadFile(mtpncFilename) assert.NoError(t, err) - assert.Equal(t, b, MultitenantPodNetworkConfigsYAML) + assert.YAMLEq(t, string(b), string(MultitenantPodNetworkConfigsYAML)) } func TestGetMultitenantPodNetworkConfigs(t *testing.T) { @@ -25,7 +25,7 @@ const nodeinfoFilename = "manifests/multitenancy.acn.azure.com_nodeinfo.yaml" func TestEmbedNodeInfo(t *testing.T) { b, err := os.ReadFile(nodeinfoFilename) assert.NoError(t, err) - assert.Equal(t, b, NodeInfoYAML) + assert.YAMLEq(t, string(b), string(NodeInfoYAML)) } func TestGetNodeInfo(t *testing.T) { @@ -38,7 +38,7 @@ const podNetworkFilename = "manifests/multitenancy.acn.azure.com_podnetworks.yam func TestEmbedPodNetwork(t *testing.T) { b, err := os.ReadFile(podNetworkFilename) assert.NoError(t, err) - assert.Equal(t, b, PodNetworkYAML) + assert.YAMLEq(t, string(b), string(PodNetworkYAML)) } func TestGetPodNetworks(t *testing.T) { @@ -51,7 +51,7 @@ const podNetworkInstanceFilename = "manifests/multitenancy.acn.azure.com_podnetw func TestEmbedPodNetworkInstance(t *testing.T) { b, err := os.ReadFile(podNetworkInstanceFilename) assert.NoError(t, err) - assert.Equal(t, b, PodNetworkInstanceYAML) + assert.YAMLEq(t, string(b), string(PodNetworkInstanceYAML)) } func TestGetPodNetworkInstances(t *testing.T) { diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml index 9d754a9773..e539ed4b24 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml @@ -144,6 +144,17 @@ spec: primaryIP: description: Deprecated - use InterfaceInfos type: string + status: + description: Status represents the overall status of the MTPNC + enum: + - Ready + - Pending + - InternalError + - PNINotFound + - PNINotReady + - NodeCapacityExceeded + - IPsExhausted + type: string type: object type: object served: true From 6d5522655714b0deac636fda6e6fb8b1b8050dcd Mon Sep 17 00:00:00 2001 From: rejain456 <155685406+rejain456@users.noreply.github.com> Date: Mon, 20 Oct 2025 20:30:48 -0700 Subject: [PATCH 08/47] CNS Change for Subnet Overlay Expansion Job (#4074) * added logic to fix cns bug for overlay subnet expansion * reverted a line change * fixed spelling * added unit test * fixing go lint * expanded on a comment * updated logic * updated test * updated validate superset logic * updated to return bool instead of error for checking cidr superset * updated logic to check for containment --------- Co-authored-by: Riya Co-authored-by: Riya --- cns/restserver/internalapi.go | 45 +++++++++-- cns/restserver/internalapi_test.go | 124 +++++++++++++++++++++++------ 2 files changed, 137 insertions(+), 32 deletions(-) diff --git a/cns/restserver/internalapi.go b/cns/restserver/internalapi.go index efefb3f2d3..de52c3e050 100644 --- a/cns/restserver/internalapi.go +++ b/cns/restserver/internalapi.go @@ -11,6 +11,7 @@ import ( "net" "net/http" "net/http/httptest" + "net/netip" "reflect" "strconv" "strings" @@ -630,13 +631,17 @@ func (service *HTTPRestService) CreateOrUpdateNetworkContainerInternal(req *cns. if ok { existingReq := existingNCInfo.CreateNetworkContainerRequest if !reflect.DeepEqual(existingReq.IPConfiguration.IPSubnet, req.IPConfiguration.IPSubnet) { - logger.Errorf("[Azure CNS] Error. PrimaryCA is not same, NCId %s, old CA %s/%d, new CA %s/%d", - req.NetworkContainerid, - existingReq.IPConfiguration.IPSubnet.IPAddress, - existingReq.IPConfiguration.IPSubnet.PrefixLength, - req.IPConfiguration.IPSubnet.IPAddress, - req.IPConfiguration.IPSubnet.PrefixLength) - return types.PrimaryCANotSame + // check for potential overlay subnet expansion - checking if new subnet is a superset of old subnet + isCIDRSuperset := validateCIDRSuperset(req.IPConfiguration.IPSubnet.IPAddress, existingReq.IPConfiguration.IPSubnet.IPAddress) + if !isCIDRSuperset { + logger.Errorf("[Azure CNS] Error. PrimaryCA is not same, NCId %s, old CA %s/%d, new CA %s/%d", //nolint:staticcheck // Suppress SA1019: logger.Errorf is deprecated + req.NetworkContainerid, + existingReq.IPConfiguration.IPSubnet.IPAddress, + existingReq.IPConfiguration.IPSubnet.PrefixLength, + req.IPConfiguration.IPSubnet.IPAddress, + req.IPConfiguration.IPSubnet.PrefixLength) + return types.PrimaryCANotSame + } } } @@ -722,3 +727,29 @@ func (service *HTTPRestService) GetIMDSNCs(ctx context.Context) (map[string]stri return ncs, nil } + +// IsCIDRSuperset returns true if newCIDR is a superset of oldCIDR (i.e., all IPs in oldCIDR are contained in newCIDR). +func validateCIDRSuperset(newCIDR, oldCIDR string) bool { + // Parse newCIDR and oldCIDR into netip.Prefix + newPrefix, err := netip.ParsePrefix(newCIDR) + if err != nil { + return false + } + + oldPrefix, err := netip.ParsePrefix(oldCIDR) + if err != nil { + return false + } + + // Condition 1: Check if the new prefix length is smaller (larger range) than the old prefix length + if newPrefix.Bits() >= oldPrefix.Bits() { + return false + } + + // Condition 2: Check for Overlap - this will also ensure containment + if !newPrefix.Overlaps(oldPrefix) { + return false + } + + return true +} diff --git a/cns/restserver/internalapi_test.go b/cns/restserver/internalapi_test.go index 4df797a498..81ccc85154 100644 --- a/cns/restserver/internalapi_test.go +++ b/cns/restserver/internalapi_test.go @@ -66,39 +66,113 @@ func TestReconcileNCStatePrimaryIPChangeShouldFail(t *testing.T) { setOrchestratorTypeInternal(cns.KubernetesCRD) svc.state.ContainerStatus = make(map[string]containerstatus) - // start with a NC in state - ncID := "555ac5c9-89f2-4b5d-b8d0-616894d6d151" - svc.state.ContainerStatus[ncID] = containerstatus{ - ID: ncID, - VMVersion: "0", - HostVersion: "0", - CreateNetworkContainerRequest: cns.CreateNetworkContainerRequest{ - NetworkContainerid: ncID, - IPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "10.0.1.0", - PrefixLength: 24, + testCases := []struct { + existingIPAddress string + requestIPAddress string + }{ + {"", "10.240.0.0/16"}, + {"10.240.0.0", "2001:db8::/64"}, + {"2001:db8::/64", "10.240.0.0/16"}, + {"10.0.1.0/22", "10.0.2.0/24"}, + {"10.0.1.0/21", "10.0.1.0/23"}, + {"10.0.1.0", "10.0.0.0/15"}, + {"10.0.1.0/15", "10.0.0.0"}, + } + + // Run test cases + for _, tc := range testCases { + // start with a NC in state + ncID := "555ac5c9-89f2-4b5d-b8d0-616894d6d150" + svc.state.ContainerStatus[ncID] = containerstatus{ + ID: ncID, + VMVersion: "0", + HostVersion: "0", + CreateNetworkContainerRequest: cns.CreateNetworkContainerRequest{ + NetworkContainerid: ncID, + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: tc.existingIPAddress, + PrefixLength: 24, + }, }, }, - }, - } + } - ncReqs := []*cns.CreateNetworkContainerRequest{ - { - NetworkContainerid: ncID, - IPConfiguration: cns.IPConfiguration{ - IPSubnet: cns.IPSubnet{ - IPAddress: "10.0.2.0", // note this IP has changed - PrefixLength: 24, + ncReqs := []*cns.CreateNetworkContainerRequest{ + { + NetworkContainerid: ncID, + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: tc.requestIPAddress, + PrefixLength: 24, + }, }, }, - }, + } + + // now try to reconcile the state where the NC primary IP has changed + resp := svc.ReconcileIPAMStateForSwift(ncReqs, map[string]cns.PodInfo{}, &v1alpha.NodeNetworkConfig{}) + + assert.Equal(t, types.PrimaryCANotSame, resp) } - // now try to reconcile the state where the NC primary IP has changed - resp := svc.ReconcileIPAMStateForSwift(ncReqs, map[string]cns.PodInfo{}, &v1alpha.NodeNetworkConfig{}) +} + +// TestReconcileNCStatePrimaryIPChangeShouldNotFail tests that reconciling NC state with +// a NC whose IP has changed should not fail if new IP is superset of old IP +func TestReconcileNCStatePrimaryIPChangeShouldNotFail(t *testing.T) { + restartService() + setEnv(t) + setOrchestratorTypeInternal(cns.KubernetesCRD) + svc.state.ContainerStatus = make(map[string]containerstatus) + + testCases := []struct { + existingIPAddress string + requestIPAddress string + }{ + {"10.0.1.0/24", "10.0.2.0/22"}, + {"10.0.1.0/20", "10.0.1.0/18"}, + {"10.0.1.0/19", "10.0.0.0/15"}, + {"10.0.1.0/18", "10.0.1.0/18"}, + } + + // Run test cases + for _, tc := range testCases { + // start with a NC in state + ncID := "555ac5c9-89f2-4b5d-b8d0-616894d6d150" + svc.state.ContainerStatus[ncID] = containerstatus{ + ID: ncID, + VMVersion: "0", + HostVersion: "0", + CreateNetworkContainerRequest: cns.CreateNetworkContainerRequest{ + NetworkContainerid: ncID, + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: tc.existingIPAddress, + PrefixLength: 24, + }, + }, + }, + } + + ncReqs := []*cns.CreateNetworkContainerRequest{ + { + NetworkContainerid: ncID, + IPConfiguration: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: tc.requestIPAddress, + PrefixLength: 24, + }, + }, + NetworkContainerType: cns.Kubernetes, + }, + } - assert.Equal(t, types.PrimaryCANotSame, resp) + // now try to reconcile the state where the NC primary IP has changed + resp := svc.ReconcileIPAMStateForSwift(ncReqs, map[string]cns.PodInfo{}, &v1alpha.NodeNetworkConfig{}) + + assert.Equal(t, types.Success, resp) + } } // TestReconcileNCStateGatewayChange tests that NC state gets updated when reconciled From 6f83142588717994333a7a8dc55b2d0cf47340b9 Mon Sep 17 00:00:00 2001 From: Paul Yu <129891899+paulyufan2@users.noreply.github.com> Date: Wed, 22 Oct 2025 08:27:40 -0400 Subject: [PATCH 09/47] chore: update dependencies for CVEs (#4093) chore: fix cns vulneribility issues Co-authored-by: paulyu --- go.mod | 16 +++++++++------- go.sum | 36 ++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 2d540cc1a3..7559aa60ff 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/sys v0.36.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect google.golang.org/grpc v1.76.0 google.golang.org/protobuf v1.36.10 gopkg.in/natefinch/lumberjack.v2 v2.2.1 @@ -160,7 +160,7 @@ require ( github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/shirou/gopsutil/v3 v3.23.5 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect @@ -168,10 +168,12 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.mongodb.org/mongo-driver v1.13.1 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect go.uber.org/dig v1.17.1 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect @@ -184,7 +186,7 @@ require ( require ( github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/errdefs v0.3.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect diff --git a/go.sum b/go.sum index 8dd5bf9a9f..7bf8095686 100644 --- a/go.sum +++ b/go.sum @@ -99,8 +99,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -335,8 +335,8 @@ github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+ github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= @@ -410,18 +410,18 @@ go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/ go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= @@ -555,8 +555,8 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b h1:ULiyYQ0FdsJhwwZUwbaXpZF5yUE3h+RA+gxvBu37ucc= google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:oDOGiMSXHL4sDTJvFvIB9nRQCGdLP1o/iVaqQK8zB+M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From 4cf2e206b0cd27ea615eaf7457da6a9bc48e6979 Mon Sep 17 00:00:00 2001 From: Paul Yu <129891899+paulyufan2@users.noreply.github.com> Date: Wed, 22 Oct 2025 10:52:21 -0400 Subject: [PATCH 10/47] downgrade easyjson GO package to unblock Release build (#4084) downgrade easyjson GO package Co-authored-by: paulyu --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7559aa60ff..15a89be24e 100644 --- a/go.mod +++ b/go.mod @@ -75,7 +75,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/labstack/echo/v4 v4.13.4 github.com/labstack/gommon v0.4.2 // indirect - github.com/mailru/easyjson v0.9.1 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect diff --git a/go.sum b/go.sum index 7bf8095686..bfd0ad7ded 100644 --- a/go.sum +++ b/go.sum @@ -265,8 +265,8 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= -github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= From 0f61b26a566e9b65961c364f37909eeea55aea7f Mon Sep 17 00:00:00 2001 From: Mugesh <51223172+mugeshsp@users.noreply.github.com> Date: Wed, 22 Oct 2025 16:55:50 +0100 Subject: [PATCH 11/47] enable dual NIC support in transparent VLAN (#4057) * feat: add SkipDefaultRoutes field to network container request and responses * feat: implement ARP proxy setting and custom route addition for VLAN interfaces * feat: enable dual NIC feature support and improve ARP proxy handling in transparent VLAN client * feat: add tests for SkipDefaultRoutes handling in network container requests * feat: remove addition of custom routes * fix: improve ARP proxy error handling * fix: Lint Errors * refactor: streamline ARP proxy setup in AddEndpointRules * fix: update comments for dual NIC support and clarify ARP proxy function --------- Co-authored-by: Mugesh SP --- cni/network/multitenancy.go | 1 + cni/network/network_linux.go | 4 +- cns/NetworkContainerContract.go | 6 +- cns/NetworkContainerContract_test.go | 37 ++++++++++ cns/restserver/util.go | 4 ++ .../transparent_vlan_endpointclient_linux.go | 27 +++++++- ...nsparent_vlan_endpointclient_linux_test.go | 68 +++++++++++++++++++ 7 files changed, 141 insertions(+), 6 deletions(-) diff --git a/cni/network/multitenancy.go b/cni/network/multitenancy.go index 67013863bd..e617afb7d6 100644 --- a/cni/network/multitenancy.go +++ b/cni/network/multitenancy.go @@ -230,6 +230,7 @@ func (m *Multitenancy) GetAllNetworkContainers( ifInfo.IPConfigs = append(ifInfo.IPConfigs, ipconfig) ifInfo.Routes = routes ifInfo.NICType = cns.InfraNIC + ifInfo.SkipDefaultRoutes = ncResponses[i].SkipDefaultRoutes // assuming we only assign infra nics in this function ipamResult.interfaceInfo[m.getInterfaceInfoKey(ifInfo.NICType, i)] = ifInfo diff --git a/cni/network/network_linux.go b/cni/network/network_linux.go index 2b090523c0..ba146c3a96 100644 --- a/cni/network/network_linux.go +++ b/cni/network/network_linux.go @@ -125,9 +125,9 @@ func getNATInfo(_ *cni.NetworkConfig, _ interface{}, _ bool) (natInfo []policy.N func platformInit(cniConfig *cni.NetworkConfig) {} -// isDualNicFeatureSupported returns if the dual nic feature is supported. Currently it's only supported for windows hnsv2 path +// isDualNicFeatureSupported returns true as the dual nic feature is supported on Linux. func (plugin *NetPlugin) isDualNicFeatureSupported(netNs string) bool { - return false + return true } func getOverlayGateway(_ *net.IPNet) (net.IP, error) { diff --git a/cns/NetworkContainerContract.go b/cns/NetworkContainerContract.go index 1795bb607b..3bbacb6558 100644 --- a/cns/NetworkContainerContract.go +++ b/cns/NetworkContainerContract.go @@ -126,6 +126,7 @@ type CreateNetworkContainerRequest struct { Routes []Route AllowHostToNCCommunication bool AllowNCToHostCommunication bool + SkipDefaultRoutes bool EndpointPolicies []NetworkContainerRequestPolicies NCStatus v1alpha.NCStatus NetworkInterfaceInfo NetworkInterfaceInfo //nolint // introducing new field for backendnic, to be used later by cni code @@ -161,10 +162,10 @@ func (req *CreateNetworkContainerRequest) String() string { return fmt.Sprintf("CreateNetworkContainerRequest"+ "{Version: %s, NetworkContainerType: %s, NetworkContainerid: %s, PrimaryInterfaceIdentifier: %s, "+ "LocalIPConfiguration: %+v, IPConfiguration: %+v, SecondaryIPConfigs: %+v, MultitenancyInfo: %+v, "+ - "AllowHostToNCCommunication: %t, AllowNCToHostCommunication: %t, NCStatus: %s, NetworkInterfaceInfo: %+v}", + "AllowHostToNCCommunication: %t, AllowNCToHostCommunication: %t, SkipDefaultRoutes: %t, NCStatus: %s, NetworkInterfaceInfo: %+v}", req.Version, req.NetworkContainerType, req.NetworkContainerid, req.PrimaryInterfaceIdentifier, req.LocalIPConfiguration, req.IPConfiguration, req.SecondaryIPConfigs, req.MultiTenancyInfo, req.AllowHostToNCCommunication, req.AllowNCToHostCommunication, - string(req.NCStatus), req.NetworkInterfaceInfo) + req.SkipDefaultRoutes, string(req.NCStatus), req.NetworkInterfaceInfo) } // NetworkContainerRequestPolicies - specifies policies associated with create network request @@ -499,6 +500,7 @@ type GetNetworkContainerResponse struct { Response Response AllowHostToNCCommunication bool AllowNCToHostCommunication bool + SkipDefaultRoutes bool NetworkInterfaceInfo NetworkInterfaceInfo } diff --git a/cns/NetworkContainerContract_test.go b/cns/NetworkContainerContract_test.go index fc17a58a4d..28cfa8fe9e 100644 --- a/cns/NetworkContainerContract_test.go +++ b/cns/NetworkContainerContract_test.go @@ -240,3 +240,40 @@ func TestPostNetworkContainersRequest_Validate(t *testing.T) { }) } } + +func TestCreateNetworkContainerRequest_SkipDefaultRoutes(t *testing.T) { + tests := []struct { + name string + req CreateNetworkContainerRequest + expected bool + }{ + { + name: "SkipDefaultRoutesTrue", + req: CreateNetworkContainerRequest{ + NetworkContainerid: "f47ac10b-58cc-0372-8567-0e02b2c3d479", + SkipDefaultRoutes: true, + }, + expected: true, + }, + { + name: "SkipDefaultRoutesFalse", + req: CreateNetworkContainerRequest{ + NetworkContainerid: "f47ac10b-58cc-0372-8567-0e02b2c3d479", + SkipDefaultRoutes: false, + }, + expected: false, + }, + { + name: "SkipDefaultRoutesIgnored", + req: CreateNetworkContainerRequest{ + NetworkContainerid: "f47ac10b-58cc-0372-8567-0e02b2c3d479", + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, tt.req.SkipDefaultRoutes, "SkipDefaultRoutes value should match expected") + }) + } +} diff --git a/cns/restserver/util.go b/cns/restserver/util.go index e95d3c535b..6a17aaf8b1 100644 --- a/cns/restserver/util.go +++ b/cns/restserver/util.go @@ -531,6 +531,7 @@ func (service *HTTPRestService) getAllNetworkContainerResponses( LocalIPConfiguration: savedReq.LocalIPConfiguration, AllowHostToNCCommunication: savedReq.AllowHostToNCCommunication, AllowNCToHostCommunication: savedReq.AllowNCToHostCommunication, + SkipDefaultRoutes: savedReq.SkipDefaultRoutes, NetworkInterfaceInfo: savedReq.NetworkInterfaceInfo, } @@ -832,6 +833,8 @@ func (service *HTTPRestService) populateIPConfigInfoUntransacted(ipConfigStatus primaryIPCfg := ncStatus.CreateNetworkContainerRequest.IPConfiguration + podIPInfo.SkipDefaultRoutes = ncStatus.CreateNetworkContainerRequest.SkipDefaultRoutes + podIPInfo.PodIPConfig = cns.IPSubnet{ IPAddress: ipConfigStatus.IPAddress, PrefixLength: primaryIPCfg.IPSubnet.PrefixLength, @@ -935,6 +938,7 @@ func (service *HTTPRestService) handleGetNetworkContainers(w http.ResponseWriter LocalIPConfiguration: ncDetails.CreateNetworkContainerRequest.LocalIPConfiguration, AllowHostToNCCommunication: ncDetails.CreateNetworkContainerRequest.AllowHostToNCCommunication, AllowNCToHostCommunication: ncDetails.CreateNetworkContainerRequest.AllowNCToHostCommunication, + SkipDefaultRoutes: ncDetails.CreateNetworkContainerRequest.SkipDefaultRoutes, } networkContainers[i] = getNcResp i++ diff --git a/network/transparent_vlan_endpointclient_linux.go b/network/transparent_vlan_endpointclient_linux.go index 326fc0c87e..227ed29ca4 100644 --- a/network/transparent_vlan_endpointclient_linux.go +++ b/network/transparent_vlan_endpointclient_linux.go @@ -292,7 +292,6 @@ func (client *TransparentVlanEndpointClient) PopulateVM(epInfo *EndpointInfo) er _, err = client.netioshim.GetNetworkInterfaceByName(client.vlanIfName) return errors.Wrap(err, "failed to get vlan interface") }, numRetries, sleepInMs) - if err != nil { deleteNSIfNotNilErr = errors.Wrapf(err, "failed to get vlan interface: %s", client.vlanIfName) return deleteNSIfNotNilErr @@ -400,14 +399,32 @@ func (client *TransparentVlanEndpointClient) PopulateVnet(epInfo *EndpointInfo) return nil } +// Set ARP proxy on the specified interface to respond to ARP requests for the gateway IP +func (client *TransparentVlanEndpointClient) setArpProxy(ifName string) error { + cmd := fmt.Sprintf("echo 1 > /proc/sys/net/ipv4/conf/%v/proxy_arp", ifName) + _, err := client.plClient.ExecuteRawCommand(cmd) + if err != nil { + logger.Error("Failed to set ARP proxy", zap.String("interface", ifName), zap.Error(err)) + return errors.Wrap(err, "failed to set arp proxy") + } + return nil +} + func (client *TransparentVlanEndpointClient) AddEndpointRules(epInfo *EndpointInfo) error { if err := client.AddSnatEndpointRules(); err != nil { return errors.Wrap(err, "failed to add snat endpoint rules") } logger.Info("[transparent-vlan] Adding tunneling rules in vnet namespace") err := ExecuteInNS(client.nsClient, client.vnetNSName, func() error { - return client.AddVnetRules(epInfo) + if err := client.AddVnetRules(epInfo); err != nil { + return err + } + + // Set ARP proxy on vnet veth (inside vnet namespace) + logger.Info("calling setArpProxy for", zap.String("vnetVethName", client.vnetVethName)) + return client.setArpProxy(client.vnetVethName) }) + return err } @@ -519,9 +536,15 @@ func (client *TransparentVlanEndpointClient) ConfigureContainerInterfacesAndRout } } + if epInfo.SkipDefaultRoutes { + logger.Info("Skipping adding routes in container ns as requested") + return nil + } + logger.Info("Adding default routes in container ns") if err := client.addDefaultRoutes(client.containerVethName, 0); err != nil { return errors.Wrap(err, "failed container ns add default routes") } + if err := client.AddDefaultArp(client.containerVethName, client.vnetMac.String()); err != nil { return errors.Wrap(err, "failed container ns add default arp") } diff --git a/network/transparent_vlan_endpointclient_linux_test.go b/network/transparent_vlan_endpointclient_linux_test.go index 79e70adb98..3639608dbd 100644 --- a/network/transparent_vlan_endpointclient_linux_test.go +++ b/network/transparent_vlan_endpointclient_linux_test.go @@ -867,6 +867,74 @@ func TestTransparentVlanConfigureContainerInterfacesAndRoutes(t *testing.T) { wantErr: true, wantErrMsg: "failed container ns add default routes: addRoutes failed: " + netio.ErrMockNetIOFail.Error() + ":B1veth0", }, + { + name: "Configure interface and routes good path with SkipDefaultRoutes set to true for container", + client: &TransparentVlanEndpointClient{ + primaryHostIfName: "eth0", + vlanIfName: "eth0.1", + vnetVethName: "A1veth0", + containerVethName: "B1veth0", + vnetNSName: "az_ns_1", + vnetMac: vnetMac, + netlink: netlink.NewMockNetlink(false, ""), + plClient: platform.NewMockExecClient(false), + netUtilsClient: networkutils.NewNetworkUtils(nl, plc), + netioshim: netio.NewMockNetIO(false, 0), + }, + epInfo: &EndpointInfo{ + SkipDefaultRoutes: true, + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("192.168.0.4"), + Mask: net.CIDRMask(subnetv4Mask, ipv4Bits), + }, + }, + Subnets: []SubnetInfo{ + { + Gateway: net.ParseIP("192.168.0.1"), + Prefix: net.IPNet{ + IP: net.ParseIP("192.168.0.0"), + Mask: net.CIDRMask(subnetv4Mask, ipv4Bits), + }, + }, + }, + }, + wantErr: false, + }, + { + name: "Configure interface and routes good path with SkipDefaultRoutes set to false for container", + client: &TransparentVlanEndpointClient{ + primaryHostIfName: "eth0", + vlanIfName: "eth0.1", + vnetVethName: "A1veth0", + containerVethName: "B1veth0", + vnetNSName: "az_ns_1", + vnetMac: vnetMac, + netlink: netlink.NewMockNetlink(false, ""), + plClient: platform.NewMockExecClient(false), + netUtilsClient: networkutils.NewNetworkUtils(nl, plc), + netioshim: netio.NewMockNetIO(false, 0), + }, + epInfo: &EndpointInfo{ + SkipDefaultRoutes: true, + IPAddresses: []net.IPNet{ + { + IP: net.ParseIP("192.168.0.4"), + Mask: net.CIDRMask(subnetv4Mask, ipv4Bits), + }, + }, + Subnets: []SubnetInfo{ + { + Gateway: net.ParseIP("192.168.0.1"), + Prefix: net.IPNet{ + IP: net.ParseIP("192.168.0.0"), + Mask: net.CIDRMask(subnetv4Mask, ipv4Bits), + }, + }, + }, + }, + wantErr: false, + }, } for _, tt := range tests { From 8092b0fef0c7ad88c53a42caef8a5f6a25345a8a Mon Sep 17 00:00:00 2001 From: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> Date: Wed, 22 Oct 2025 10:28:35 -0700 Subject: [PATCH 12/47] fix: Adding delete timestamp check (#4078) * Adding delete timestamp check * Adding UT and refactoring getmtpnc * Update cns/middlewares/k8sSwiftV2_linux_test.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> * Changing the MTPNC error message * Removing unused fields * Revert manifest changes for CRD files * Fixing error message and adding comments * Revert CRD API files to master state * fix: correcting comment assertion --------- Signed-off-by: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cns/middlewares/k8sSwiftV2.go | 5 +- cns/middlewares/k8sSwiftV2_linux_test.go | 10 +++ cns/middlewares/mock/mockClient.go | 99 ++++++++++++++---------- crd/multitenancy/api/v1alpha1/utils.go | 6 ++ 4 files changed, 77 insertions(+), 43 deletions(-) diff --git a/cns/middlewares/k8sSwiftV2.go b/cns/middlewares/k8sSwiftV2.go index 8805996060..835c552dbc 100644 --- a/cns/middlewares/k8sSwiftV2.go +++ b/cns/middlewares/k8sSwiftV2.go @@ -28,6 +28,7 @@ var ( errGetMTPNC = errors.New(NetworkNotReadyErrorMsg + " - failed to get MTPNC") errInvalidSWIFTv2NICType = errors.New("invalid NIC type for SWIFT v2 scenario") errInvalidMTPNCPrefixLength = errors.New("invalid prefix length for MTPNC primaryIP, must be 32") + errMTPNCDeleting = errors.New(NetworkNotReadyErrorMsg + " - mtpnc for previous pod is being deleted, waiting for new mtpnc to be ready") ) type K8sSWIFTv2Middleware struct { @@ -54,7 +55,9 @@ func (k *K8sSWIFTv2Middleware) GetPodInfoForIPConfigsRequest(ctx context.Context if respCode != types.Success { return nil, respCode, message } - + if mtpnc.IsDeleting() { + return nil, types.UnexpectedError, errMTPNCDeleting.Error() + } // update ipConfigRequest respCode, message = k.UpdateIPConfigRequest(mtpnc, req) if respCode != types.Success { diff --git a/cns/middlewares/k8sSwiftV2_linux_test.go b/cns/middlewares/k8sSwiftV2_linux_test.go index 52b257acfb..c5d84b5865 100644 --- a/cns/middlewares/k8sSwiftV2_linux_test.go +++ b/cns/middlewares/k8sSwiftV2_linux_test.go @@ -41,6 +41,9 @@ var ( testPod9GUID = "2006cad4-e54d-472e-863d-c4bac66200a7" testPod9Info = cns.NewPodInfo("2006cad4-eth0", testPod9GUID, "testpod9", "testpod9namespace") + + testPodMtpncTerminatingGUID = "e3b0c442-98fc-1fc1-9b93-7a1c2e5c8e6f" + testPodMtpncTerminatingInfo = cns.NewPodInfo("2006cad4-eth0", testPodMtpncTerminatingGUID, "testpodMtpncTerminating", "testpodMtpncTerminatingnamespace") ) func TestMain(m *testing.M) { @@ -217,6 +220,13 @@ func TestValidateMultitenantIPConfigsRequestFailure(t *testing.T) { _, respCode, msg = middleware.GetPodInfoForIPConfigsRequest(context.TODO(), failReq) assert.Equal(t, respCode, types.UnexpectedError) assert.Assert(t, strings.Contains(msg, NetworkNotReadyErrorMsg), "expected error message to contain '%s', got '%s'", NetworkNotReadyErrorMsg, msg) + + // Delete Timestamp is set + b, _ = testPodMtpncTerminatingInfo.OrchestratorContext() + failReq.OrchestratorContext = b + _, respCode, msg = middleware.GetPodInfoForIPConfigsRequest(context.TODO(), failReq) + assert.Equal(t, respCode, types.UnexpectedError) + assert.Assert(t, strings.Contains(msg, NetworkNotReadyErrorMsg), "expected error message to contain '%s', got '%s'", NetworkNotReadyErrorMsg, msg) } func TestGetSWIFTv2IPConfigSuccess(t *testing.T) { diff --git a/cns/middlewares/mock/mockClient.go b/cns/middlewares/mock/mockClient.go index 843f03111a..52ebd04225 100644 --- a/cns/middlewares/mock/mockClient.go +++ b/cns/middlewares/mock/mockClient.go @@ -7,6 +7,7 @@ import ( "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -66,31 +67,35 @@ func NewClient() *Client { testPod10.Labels = make(map[string]string) testPod10.Labels[configuration.LabelPodNetworkInstanceSwiftV2] = podNetwork + testPodMtpncTerminating := v1.Pod{} + testPodMtpncTerminating.Labels = make(map[string]string) + testPodMtpncTerminating.Labels[configuration.LabelPodSwiftV2] = podNetwork + testInterfaceInfos1 := v1alpha1.InterfaceInfo{ - NCID: "testncid", - PrimaryIP: "192.168.0.1/32", - MacAddress: "00:00:00:00:00:00", - GatewayIP: "10.0.0.1", - DeviceType: v1alpha1.DeviceTypeVnetNIC, - AccelnetEnabled: false, + NCID: "testncid", + PrimaryIP: "192.168.0.1/32", + MacAddress: "00:00:00:00:00:00", + GatewayIP: "10.0.0.1", + DeviceType: v1alpha1.DeviceTypeVnetNIC, + AccelnetEnabled: false, SubnetAddressSpace: "192.168.0.0/24", } testInterfaceInfos3 := v1alpha1.InterfaceInfo{ - NCID: "testncid", - PrimaryIP: "192.168.0.1/32", - MacAddress: "00:00:00:00:00:00", - GatewayIP: "10.0.0.1", - DeviceType: v1alpha1.DeviceTypeVnetNIC, - AccelnetEnabled: false, + NCID: "testncid", + PrimaryIP: "192.168.0.1/32", + MacAddress: "00:00:00:00:00:00", + GatewayIP: "10.0.0.1", + DeviceType: v1alpha1.DeviceTypeVnetNIC, + AccelnetEnabled: false, SubnetAddressSpace: "192.168.0.0/24", } testInterfaceInfos5 := v1alpha1.InterfaceInfo{ - NCID: "testncid", - PrimaryIP: "192.168.0.1/32", - MacAddress: "00:00:00:00:00:00", - GatewayIP: "10.0.0.1", - DeviceType: v1alpha1.DeviceTypeInfiniBandNIC, - AccelnetEnabled: true, + NCID: "testncid", + PrimaryIP: "192.168.0.1/32", + MacAddress: "00:00:00:00:00:00", + GatewayIP: "10.0.0.1", + DeviceType: v1alpha1.DeviceTypeInfiniBandNIC, + AccelnetEnabled: true, SubnetAddressSpace: "192.168.0.0/24", } @@ -166,28 +171,38 @@ func NewClient() *Client { Status: v1alpha1.MultitenantPodNetworkConfigStatus{}, } + testMTPNCTerminating := v1alpha1.MultitenantPodNetworkConfig{ + Status: v1alpha1.MultitenantPodNetworkConfigStatus{ + InterfaceInfos: []v1alpha1.InterfaceInfo{testInterfaceInfos1}, + }, + } + now := metav1.Now() + testMTPNCTerminating.DeletionTimestamp = &now + return &Client{ mtPodCache: map[string]*v1.Pod{ - "testpod1namespace/testpod1": &testPod1, - "testpod3namespace/testpod3": &testPod3, - "testpod4namespace/testpod4": &testPod4, - "testpod5namespace/testpod5": &testPod5, - "testpod6namespace/testpod6": &testPod6, - "testpod7namespace/testpod7": &testPod7, - "testpod8namespace/testpod8": &testPod8, - "testpod9namespace/testpod9": &testPod9, - "testpod10namespace/testpod10": &testPod10, + "testpod1namespace/testpod1": &testPod1, + "testpod3namespace/testpod3": &testPod3, + "testpod4namespace/testpod4": &testPod4, + "testpod5namespace/testpod5": &testPod5, + "testpod6namespace/testpod6": &testPod6, + "testpod7namespace/testpod7": &testPod7, + "testpod8namespace/testpod8": &testPod8, + "testpod9namespace/testpod9": &testPod9, + "testpod10namespace/testpod10": &testPod10, + "testpodMtpncTerminatingnamespace/testpodMtpncTerminating": &testPodMtpncTerminating, }, mtpncCache: map[string]*v1alpha1.MultitenantPodNetworkConfig{ - "testpod1namespace/testpod1": &testMTPNC1, - "testpod2namespace/testpod2": &testMTPNC2, - "testpod4namespace/testpod4": &testMTPNC4, - "testpod5namespace/testpod5": &testMTPNC3, - "testpod6namespace/testpod6": &testMTPNC5, - "testpod7namespace/testpod7": &testMTPNCMulti, - "testpod8namespace/testpod8": &testMTPNC8, - "testpod9namespace/testpod9": &testMTPNC9, - "testpod10namespace/testpod10": &testMTPNC10, + "testpod1namespace/testpod1": &testMTPNC1, + "testpod2namespace/testpod2": &testMTPNC2, + "testpod4namespace/testpod4": &testMTPNC4, + "testpod5namespace/testpod5": &testMTPNC3, + "testpod6namespace/testpod6": &testMTPNC5, + "testpod7namespace/testpod7": &testMTPNCMulti, + "testpod8namespace/testpod8": &testMTPNC8, + "testpod9namespace/testpod9": &testMTPNC9, + "testpod10namespace/testpod10": &testMTPNC10, + "testpodMtpncTerminatingnamespace/testpodMtpncTerminating": &testMTPNCTerminating, }, } } @@ -213,12 +228,12 @@ func (c *Client) Get(_ context.Context, key client.ObjectKey, obj client.Object, func (c *Client) SetMTPNCReady() { testInterfaceInfos1 := v1alpha1.InterfaceInfo{ - NCID: "testncid", - PrimaryIP: "192.168.0.1/32", - MacAddress: "00:00:00:00:00:00", - GatewayIP: "10.0.0.1", - DeviceType: v1alpha1.DeviceTypeVnetNIC, - AccelnetEnabled: false, + NCID: "testncid", + PrimaryIP: "192.168.0.1/32", + MacAddress: "00:00:00:00:00:00", + GatewayIP: "10.0.0.1", + DeviceType: v1alpha1.DeviceTypeVnetNIC, + AccelnetEnabled: false, SubnetAddressSpace: "192.168.0.0/24", } diff --git a/crd/multitenancy/api/v1alpha1/utils.go b/crd/multitenancy/api/v1alpha1/utils.go index 49bedd9358..0cd6990ec0 100644 --- a/crd/multitenancy/api/v1alpha1/utils.go +++ b/crd/multitenancy/api/v1alpha1/utils.go @@ -9,3 +9,9 @@ func (m *MultitenantPodNetworkConfig) IsReady() bool { // Check if InterfaceInfos slice is not empty return !reflect.DeepEqual(m.Status, MultitenantPodNetworkConfigStatus{}) } + +// IsDeleting returns true if the MultitenantPodNetworkConfig resource has been marked for deletion. +// A resource is considered to be deleting when its DeletionTimestamp field is set. +func (m *MultitenantPodNetworkConfig) IsDeleting() bool { + return !m.DeletionTimestamp.IsZero() +} From b3d654d9583cc28bec41ab72f7db7780f5dd18cf Mon Sep 17 00:00:00 2001 From: Paul Yu <129891899+paulyufan2@users.noreply.github.com> Date: Wed, 22 Oct 2025 21:35:13 -0400 Subject: [PATCH 13/47] Update GO security dependencies (#4096) * update go security dependencies * update retry dependency --------- Co-authored-by: paulyu --- go.mod | 20 ++++++++++---------- go.sum | 44 ++++++++++++++++++++++---------------------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/go.mod b/go.mod index 15a89be24e..bf07d7f6ac 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Microsoft/go-winio v0.6.2 github.com/Microsoft/hcsshim v0.13.0 github.com/avast/retry-go/v3 v3.1.1 - github.com/avast/retry-go/v4 v4.6.1 + github.com/avast/retry-go/v4 v4.7.0 github.com/billgraziano/dpapi v0.5.0 github.com/containernetworking/cni v1.3.0 github.com/evanphx/json-patch/v5 v5.9.11 // indirect @@ -36,7 +36,7 @@ require ( github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sys v0.36.0 + golang.org/x/sys v0.37.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect google.golang.org/grpc v1.76.0 google.golang.org/protobuf v1.36.10 @@ -86,7 +86,7 @@ require ( github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/common v0.66.1 + github.com/prometheus/common v0.67.1 github.com/prometheus/procfs v0.16.1 // indirect github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.15.0 // indirect @@ -98,12 +98,12 @@ require ( github.com/vishvananda/netns v0.0.5 go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.42.0 - golang.org/x/net v0.45.0 - golang.org/x/oauth2 v0.31.0 // indirect - golang.org/x/term v0.35.0 // indirect - golang.org/x/text v0.29.0 // indirect - golang.org/x/time v0.13.0 + golang.org/x/crypto v0.43.0 + golang.org/x/net v0.46.0 + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/time v0.14.0 golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect @@ -175,7 +175,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect go.uber.org/dig v1.17.1 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect diff --git a/go.sum b/go.sum index bfd0ad7ded..dbcced8ba9 100644 --- a/go.sum +++ b/go.sum @@ -55,8 +55,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/avast/retry-go/v3 v3.1.1 h1:49Scxf4v8PmiQ/nY0aY3p0hDueqSmc7++cBbtiDGu2g= github.com/avast/retry-go/v3 v3.1.1/go.mod h1:6cXRK369RpzFL3UQGqIUp9Q7GDrams+KsYWrfNA1/nQ= -github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= -github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= +github.com/avast/retry-go/v4 v4.7.0 h1:yjDs35SlGvKwRNSykujfjdMxMhMQQM0TnIjJaHB+Zio= +github.com/avast/retry-go/v4 v4.7.0/go.mod h1:ZMPDa3sY2bKgpLtap9JRUgk2yTAba7cgiFhqxY2Sg6Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/billgraziano/dpapi v0.5.0 h1:pcxA17vyjbDqYuxCFZbgL9tYIk2xgbRZjRaIbATwh+8= @@ -330,8 +330,8 @@ github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UH github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI= +github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -432,8 +432,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= @@ -443,8 +443,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -471,11 +471,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM= -golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= -golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -508,22 +508,22 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= -golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= -golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= -golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -535,8 +535,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 3ddc447c758a492acabb50c31e566084cd63eefd Mon Sep 17 00:00:00 2001 From: shreyashastantram <105284415+shreyashastantram@users.noreply.github.com> Date: Fri, 24 Oct 2025 07:03:08 -0700 Subject: [PATCH 14/47] Expanding mtpnc status to include delete status (#4085) * Adding delete timestamp check * Adding status and removing deprecated fields from output * Revert MTPNC deletion timestamp check * fix: removing unused fields * fix: removing unused fields * Adding documentation for the status * Formatting the config file * Addressing PR comments * Changing type for pod uid --- .../v1alpha1/multitenantpodnetworkconfig.go | 29 +++++++++++++------ crd/multitenancy/api/v1alpha1/podnetwork.go | 2 +- .../api/v1alpha1/podnetworkinstance.go | 1 - ...zure.com_multitenantpodnetworkconfigs.yaml | 10 +++++-- ...ncy.acn.azure.com_podnetworkinstances.yaml | 4 --- ...ultitenancy.acn.azure.com_podnetworks.yaml | 2 +- 6 files changed, 29 insertions(+), 19 deletions(-) diff --git a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go index 099e510419..198b990415 100644 --- a/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go +++ b/crd/multitenancy/api/v1alpha1/multitenantpodnetworkconfig.go @@ -5,6 +5,7 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // Important: Run "make" to regenerate code after modifying this file @@ -17,8 +18,9 @@ import ( // +kubebuilder:metadata:labels=managed= // +kubebuilder:metadata:labels=owner= // +kubebuilder:printcolumn:name="PodNetworkInstance",type=string,JSONPath=`.spec.podNetworkInstance` -// +kubebuilder:printcolumn:name="PodNetwork",type=string,JSONPath=`.spec.podNetwork` // +kubebuilder:printcolumn:name="PodName",type=string,JSONPath=`.spec.podName` +// +kubebuilder:printcolumn:name="PodUID",type=string,JSONPath=`.spec.podUID` +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.status` type MultitenantPodNetworkConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -49,7 +51,7 @@ type MultitenantPodNetworkConfigSpec struct { // +kubebuilder:validation:Optional IBMACAddresses []string `json:"IBMACAddresses,omitempty"` // PodUID is the UID of the pod - PodUID string `json:"podUID,omitempty"` + PodUID types.UID `json:"podUID,omitempty"` } // +kubebuilder:validation:Enum=Unprogrammed;Programming;Programmed;Unprogramming;Failed @@ -64,17 +66,26 @@ const ( ) // MTPNCStatus indicates the high-level status of MultitenantPodNetworkConfig -// +kubebuilder:validation:Enum=Ready;Pending;InternalError;PNINotFound;PNINotReady;NodeCapacityExceeded;IPsExhausted +// +kubebuilder:validation:Enum=Ready;Pending;InternalError;PNINotFound;PNINotReady;NodeCapacityExceeded;IPsExhausted;Deleting type MTPNCStatus string const ( - MTPNCStatusReady MTPNCStatus = "Ready" - MTPNCStatusPending MTPNCStatus = "Pending" - MTPNCStatusInternalError MTPNCStatus = "InternalError" - MTPNCStatusPNINotFound MTPNCStatus = "PNINotFound" - MTPNCStatusPNINotReady MTPNCStatus = "PNINotReady" + // MTPNCStatusReady indicates the MTPNC has been successfully programmed and is ready for use + MTPNCStatusReady MTPNCStatus = "Ready" + // MTPNCStatusPending indicates the MTPNC is awaiting processing + MTPNCStatusPending MTPNCStatus = "Pending" + // MTPNCStatusInternalError indicates an internal error occurred while processing the MTPNC + MTPNCStatusInternalError MTPNCStatus = "InternalError" + // MTPNCStatusPNINotFound indicates the referenced PodNetworkInstance was not found + MTPNCStatusPNINotFound MTPNCStatus = "PNINotFound" + // MTPNCStatusPNINotReady indicates the referenced PodNetworkInstance is not yet ready + MTPNCStatusPNINotReady MTPNCStatus = "PNINotReady" + // MTPNCStatusNodeCapacityExceeded indicates the node has exceeded its capacity for network resources MTPNCStatusNodeCapacityExceeded MTPNCStatus = "NodeCapacityExceeded" - MTPNCStatusIPsExhausted MTPNCStatus = "IPsExhausted" + // MTPNCStatusIPsExhausted indicates no IP addresses are available for allocation + MTPNCStatusIPsExhausted MTPNCStatus = "IPsExhausted" + // MTPNCStatusDeleting indicates MTPNC is being deleted, status may not be set at the same time as deletionTimestamp. + MTPNCStatusDeleting MTPNCStatus = "Deleting" ) type InterfaceInfo struct { diff --git a/crd/multitenancy/api/v1alpha1/podnetwork.go b/crd/multitenancy/api/v1alpha1/podnetwork.go index 4f21ccbe07..f2b23c7670 100644 --- a/crd/multitenancy/api/v1alpha1/podnetwork.go +++ b/crd/multitenancy/api/v1alpha1/podnetwork.go @@ -19,7 +19,7 @@ import ( // +kubebuilder:printcolumn:name="Network",type=string,priority=1,JSONPath=`.spec.networkID` // +kubebuilder:printcolumn:name="Subnet",type=string,priority=1,JSONPath=`.spec.subnetResourceID` // +kubebuilder:printcolumn:name="SubnetGUID",type=string,priority=1,JSONPath=`.spec.subnetGUID` -// +kubebuilder:printcolumn:name="DeviceType",type=string,priority=1,JSONPath=`.spec.subnetGUID` +// +kubebuilder:printcolumn:name="DeviceType",type=string,priority=1,JSONPath=`.spec.deviceType` type PodNetwork struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/crd/multitenancy/api/v1alpha1/podnetworkinstance.go b/crd/multitenancy/api/v1alpha1/podnetworkinstance.go index 0437bee57f..3f78dd58e5 100644 --- a/crd/multitenancy/api/v1alpha1/podnetworkinstance.go +++ b/crd/multitenancy/api/v1alpha1/podnetworkinstance.go @@ -17,7 +17,6 @@ import ( // +kubebuilder:metadata:labels=managed= // +kubebuilder:metadata:labels=owner= // +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.status` -// +kubebuilder:printcolumn:name="PodNetworks",priority=1,type=string,JSONPath=`.spec.podNetworks` type PodNetworkInstance struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml index e539ed4b24..db8c1de9ba 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_multitenantpodnetworkconfigs.yaml @@ -23,12 +23,15 @@ spec: - jsonPath: .spec.podNetworkInstance name: PodNetworkInstance type: string - - jsonPath: .spec.podNetwork - name: PodNetwork - type: string - jsonPath: .spec.podName name: PodName type: string + - jsonPath: .spec.podUID + name: PodUID + type: string + - jsonPath: .status.status + name: Status + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -154,6 +157,7 @@ spec: - PNINotReady - NodeCapacityExceeded - IPsExhausted + - Deleting type: string type: object type: object diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml index 3f13488dbe..432cb5a222 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworkinstances.yaml @@ -23,10 +23,6 @@ spec: - jsonPath: .status.status name: Status type: string - - jsonPath: .spec.podNetworks - name: PodNetworks - priority: 1 - type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml index 90ed5b698f..173174e771 100644 --- a/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml +++ b/crd/multitenancy/manifests/multitenancy.acn.azure.com_podnetworks.yaml @@ -37,7 +37,7 @@ spec: name: SubnetGUID priority: 1 type: string - - jsonPath: .spec.subnetGUID + - jsonPath: .spec.deviceType name: DeviceType priority: 1 type: string From 4e742b10f817c0620879e36cf9cd463c29834bb7 Mon Sep 17 00:00:00 2001 From: rejain789 Date: Wed, 5 Nov 2025 09:42:25 -0800 Subject: [PATCH 15/47] [CNS] Overlay Expansion Subnet Update Job Bug Fix (#4103) hot fix --- cns/restserver/internalapi.go | 4 ++- cns/restserver/internalapi_test.go | 46 ++++++++++++++++-------------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/cns/restserver/internalapi.go b/cns/restserver/internalapi.go index de52c3e050..9f855ec64f 100644 --- a/cns/restserver/internalapi.go +++ b/cns/restserver/internalapi.go @@ -632,7 +632,9 @@ func (service *HTTPRestService) CreateOrUpdateNetworkContainerInternal(req *cns. existingReq := existingNCInfo.CreateNetworkContainerRequest if !reflect.DeepEqual(existingReq.IPConfiguration.IPSubnet, req.IPConfiguration.IPSubnet) { // check for potential overlay subnet expansion - checking if new subnet is a superset of old subnet - isCIDRSuperset := validateCIDRSuperset(req.IPConfiguration.IPSubnet.IPAddress, existingReq.IPConfiguration.IPSubnet.IPAddress) + isCIDRSuperset := validateCIDRSuperset( + fmt.Sprintf("%s/%d", req.IPConfiguration.IPSubnet.IPAddress, req.IPConfiguration.IPSubnet.PrefixLength), + fmt.Sprintf("%s/%d", existingReq.IPConfiguration.IPSubnet.IPAddress, existingReq.IPConfiguration.IPSubnet.PrefixLength)) if !isCIDRSuperset { logger.Errorf("[Azure CNS] Error. PrimaryCA is not same, NCId %s, old CA %s/%d, new CA %s/%d", //nolint:staticcheck // Suppress SA1019: logger.Errorf is deprecated req.NetworkContainerid, diff --git a/cns/restserver/internalapi_test.go b/cns/restserver/internalapi_test.go index 81ccc85154..440e3b4e61 100644 --- a/cns/restserver/internalapi_test.go +++ b/cns/restserver/internalapi_test.go @@ -67,16 +67,16 @@ func TestReconcileNCStatePrimaryIPChangeShouldFail(t *testing.T) { svc.state.ContainerStatus = make(map[string]containerstatus) testCases := []struct { - existingIPAddress string - requestIPAddress string + reqIPAddress string + reqPrefixLength uint8 + existingIPAddress string + existingPrefixLength uint8 }{ - {"", "10.240.0.0/16"}, - {"10.240.0.0", "2001:db8::/64"}, - {"2001:db8::/64", "10.240.0.0/16"}, - {"10.0.1.0/22", "10.0.2.0/24"}, - {"10.0.1.0/21", "10.0.1.0/23"}, - {"10.0.1.0", "10.0.0.0/15"}, - {"10.0.1.0/15", "10.0.0.0"}, + {"10.240.1.0", 16, "10.240.0.0", 16}, + {"10.240.0.0", 64, "2001:db8::", 64}, + {"2001:db8::", 64, "10.240.0.0", 16}, + {"10.0.1.0", 24, "10.0.2.0", 22}, + {"10.0.1.0", 23, "10.0.1.0", 21}, } // Run test cases @@ -92,7 +92,7 @@ func TestReconcileNCStatePrimaryIPChangeShouldFail(t *testing.T) { IPConfiguration: cns.IPConfiguration{ IPSubnet: cns.IPSubnet{ IPAddress: tc.existingIPAddress, - PrefixLength: 24, + PrefixLength: tc.existingPrefixLength, }, }, }, @@ -103,8 +103,8 @@ func TestReconcileNCStatePrimaryIPChangeShouldFail(t *testing.T) { NetworkContainerid: ncID, IPConfiguration: cns.IPConfiguration{ IPSubnet: cns.IPSubnet{ - IPAddress: tc.requestIPAddress, - PrefixLength: 24, + IPAddress: tc.reqIPAddress, + PrefixLength: tc.reqPrefixLength, }, }, }, @@ -127,13 +127,17 @@ func TestReconcileNCStatePrimaryIPChangeShouldNotFail(t *testing.T) { svc.state.ContainerStatus = make(map[string]containerstatus) testCases := []struct { - existingIPAddress string - requestIPAddress string + reqIPAddress string + reqPrefixLength uint8 + existingIPAddress string + existingPrefixLength uint8 }{ - {"10.0.1.0/24", "10.0.2.0/22"}, - {"10.0.1.0/20", "10.0.1.0/18"}, - {"10.0.1.0/19", "10.0.0.0/15"}, - {"10.0.1.0/18", "10.0.1.0/18"}, + {"10.240.0.0", 20, "10.240.0.0", 24}, + + {"10.0.1.0", 22, "10.0.2.0", 24}, + {"10.0.1.0", 18, "10.0.1.0", 20}, + {"10.0.1.0", 15, "10.0.0.0", 19}, + {"10.0.1.0", 18, "10.0.1.0", 18}, } // Run test cases @@ -149,7 +153,7 @@ func TestReconcileNCStatePrimaryIPChangeShouldNotFail(t *testing.T) { IPConfiguration: cns.IPConfiguration{ IPSubnet: cns.IPSubnet{ IPAddress: tc.existingIPAddress, - PrefixLength: 24, + PrefixLength: tc.existingPrefixLength, }, }, }, @@ -160,8 +164,8 @@ func TestReconcileNCStatePrimaryIPChangeShouldNotFail(t *testing.T) { NetworkContainerid: ncID, IPConfiguration: cns.IPConfiguration{ IPSubnet: cns.IPSubnet{ - IPAddress: tc.requestIPAddress, - PrefixLength: 24, + IPAddress: tc.reqIPAddress, + PrefixLength: tc.reqPrefixLength, }, }, NetworkContainerType: cns.Kubernetes, From 32928f930fc72f121173a7aa5b0c9b5fe4d26e71 Mon Sep 17 00:00:00 2001 From: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Date: Wed, 5 Nov 2025 11:30:44 -0800 Subject: [PATCH 16/47] Swiftv2 Long running cluster - test pipeline (#4099) * init swiftv2 pipeline for persistent tests on aks clusters. * Set default params. * Update pipeline.yaml for Azure Pipelines * long running pipeline infra setup. * Set depedencies for pipeline jobs. * template for long running cluster. * set template. * set dependency for jobs. * Change job name. * Set job scripts. * set pipeline scripts with permissions. * set script path. * set template params. * Set pipeline template for long running clusters. * test change. * set params. * set params in pipeline scripts. * set cx vnet name. * Create clusters parallely * create NSG. * Change dependency for creating nsg. * Update .pipelines/swiftv2-long-running/scripts/create_peerings.sh Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Update .pipelines/swiftv2-long-running/scripts/create_nsg.sh Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Add success/error message for each resource creation. * Remove unused argument from template. * Rename subnets. Changed NSG rules to prevent network connectivity between vnet 1 subnet 1 and vnet 1 subnet2. * Private endpoints. * Change pipeline template. * Set output variables. * private endpoint. * update private endpoint. * create storage account. * disallow shared key access. * change pipeline template. * Removed unused param. * Link private endpoint dns to vnet a2 and vnet a3. * attach nsg rule to subnets. * Link nsg with subnet. * Private endpoint fix - long running pipeline. * Verify each resource creation - long running cluster test pipeline. * verify storage account creation. * use make tragets to create aks clusters. * misc. * set aks custom headers. * Use aks common field in swiftv2-podsubnet-cluster creation. --------- Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Co-authored-by: sivakami Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .pipelines/swiftv2-long-running/pipeline.yaml | 42 ++++++ .../scripts/create_aks.sh | 63 ++++++++ .../scripts/create_nsg.sh | 109 ++++++++++++++ .../swiftv2-long-running/scripts/create_pe.sh | 87 +++++++++++ .../scripts/create_peerings.sh | 38 +++++ .../scripts/create_storage.sh | 47 ++++++ .../scripts/create_vnets.sh | 84 +++++++++++ .../long-running-pipeline-template.yaml | 140 ++++++++++++++++++ hack/aks/Makefile | 26 ++++ 9 files changed, 636 insertions(+) create mode 100644 .pipelines/swiftv2-long-running/pipeline.yaml create mode 100644 .pipelines/swiftv2-long-running/scripts/create_aks.sh create mode 100644 .pipelines/swiftv2-long-running/scripts/create_nsg.sh create mode 100644 .pipelines/swiftv2-long-running/scripts/create_pe.sh create mode 100644 .pipelines/swiftv2-long-running/scripts/create_peerings.sh create mode 100644 .pipelines/swiftv2-long-running/scripts/create_storage.sh create mode 100644 .pipelines/swiftv2-long-running/scripts/create_vnets.sh create mode 100644 .pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml diff --git a/.pipelines/swiftv2-long-running/pipeline.yaml b/.pipelines/swiftv2-long-running/pipeline.yaml new file mode 100644 index 0000000000..b6d085901d --- /dev/null +++ b/.pipelines/swiftv2-long-running/pipeline.yaml @@ -0,0 +1,42 @@ +trigger: none + +parameters: + - name: subscriptionId + displayName: "Azure Subscription ID" + type: string + default: "37deca37-c375-4a14-b90a-043849bd2bf1" + + - name: location + displayName: "Deployment Region" + type: string + default: "centraluseuap" + + - name: resourceGroupName + displayName: "Resource Group Name" + type: string + default: "long-run-$(Build.BuildId)" + + - name: vmSkuDefault + displayName: "VM SKU for Default Node Pool" + type: string + default: "Standard_D2s_v3" + + - name: vmSkuHighNIC + displayName: "VM SKU for High NIC Node Pool" + type: string + default: "Standard_D16s_v3" + + - name: serviceConnection + displayName: "Azure Service Connection" + type: string + default: "Azure Container Networking - Standalone Test Service Connection" + +extends: + template: template/long-running-pipeline-template.yaml + parameters: + subscriptionId: ${{ parameters.subscriptionId }} + location: ${{ parameters.location }} + resourceGroupName: ${{ parameters.resourceGroupName }} + vmSkuDefault: ${{ parameters.vmSkuDefault }} + vmSkuHighNIC: ${{ parameters.vmSkuHighNIC }} + serviceConnection: ${{ parameters.serviceConnection }} diff --git a/.pipelines/swiftv2-long-running/scripts/create_aks.sh b/.pipelines/swiftv2-long-running/scripts/create_aks.sh new file mode 100644 index 0000000000..4ab38c0f42 --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/create_aks.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -euo pipefail +trap 'echo "[ERROR] Failed during Resource group or AKS cluster creation." >&2' ERR +SUBSCRIPTION_ID=$1 +LOCATION=$2 +RG=$3 +VM_SKU_DEFAULT=$4 +VM_SKU_HIGHNIC=$5 + +CLUSTER_COUNT=2 +CLUSTER_PREFIX="aks" +DEFAULT_NODE_COUNT=1 +COMMON_TAGS="fastpathenabled=true RGOwner=LongRunningTestPipelines stampcreatorserviceinfo=true" + +wait_for_provisioning() { # Helper for safe retry/wait for provisioning states (basic) + local rg="$1" clusterName="$2" + echo "Waiting for AKS '$clusterName' in RG '$rg' to reach Succeeded/Failed (polling)..." + while :; do + state=$(az aks show --resource-group "$rg" --name "$clusterName" --query provisioningState -o tsv 2>/dev/null || true) + if [ -z "$state" ]; then + sleep 3 + continue + fi + case "$state" in + Succeeded|Succeeded*) echo "Provisioning state: $state"; break ;; + Failed|Canceled|Rejected) echo "Provisioning finished with state: $state"; break ;; + *) printf "."; sleep 6 ;; + esac + done +} + + +for i in $(seq 1 "$CLUSTER_COUNT"); do + echo "==============================" + echo " Working on cluster set #$i" + echo "==============================" + + CLUSTER_NAME="${CLUSTER_PREFIX}-${i}" + echo "Creating AKS cluster '$CLUSTER_NAME' in RG '$RG'" + + make -C ./hack/aks azcfg AZCLI=az REGION=$LOCATION + + make -C ./hack/aks swiftv2-podsubnet-cluster-up \ + AZCLI=az REGION=$LOCATION \ + SUB=$SUBSCRIPTION_ID \ + GROUP=$RG \ + CLUSTER=$CLUSTER_NAME \ + NODE_COUNT=$DEFAULT_NODE_COUNT \ + VM_SIZE=$VM_SKU_DEFAULT \ + + echo " - waiting for AKS provisioning state..." + wait_for_provisioning "$RG" "$CLUSTER_NAME" + + echo "Adding multi-tenant nodepool ' to '$CLUSTER_NAME'" + make -C ./hack/aks linux-swiftv2-nodepool-up \ + AZCLI=az REGION=$LOCATION \ + GROUP=$RG \ + VM_SIZE=$VM_SKU_HIGHNIC \ + CLUSTER=$CLUSTER_NAME \ + SUB=$SUBSCRIPTION_ID \ + +done +echo "All done. Created $CLUSTER_COUNT cluster set(s)." diff --git a/.pipelines/swiftv2-long-running/scripts/create_nsg.sh b/.pipelines/swiftv2-long-running/scripts/create_nsg.sh new file mode 100644 index 0000000000..cec91cd7cf --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/create_nsg.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash +set -e +trap 'echo "[ERROR] Failed during NSG creation or rule setup." >&2' ERR + +SUBSCRIPTION_ID=$1 +RG=$2 +LOCATION=$3 + +VNET_A1="cx_vnet_a1" +SUBNET1_PREFIX="10.10.1.0/24" +SUBNET2_PREFIX="10.10.2.0/24" +NSG_NAME="${VNET_A1}-nsg" + +verify_nsg() { + local rg="$1"; local name="$2" + echo "==> Verifying NSG: $name" + if az network nsg show -g "$rg" -n "$name" &>/dev/null; then + echo "[OK] Verified NSG $name exists." + else + echo "[ERROR] NSG $name not found!" >&2 + exit 1 + fi +} + +verify_nsg_rule() { + local rg="$1"; local nsg="$2"; local rule="$3" + echo "==> Verifying NSG rule: $rule in $nsg" + if az network nsg rule show -g "$rg" --nsg-name "$nsg" -n "$rule" &>/dev/null; then + echo "[OK] Verified NSG rule $rule exists in $nsg." + else + echo "[ERROR] NSG rule $rule not found in $nsg!" >&2 + exit 1 + fi +} + +verify_subnet_nsg_association() { + local rg="$1"; local vnet="$2"; local subnet="$3"; local nsg="$4" + echo "==> Verifying NSG association on subnet $subnet..." + local associated_nsg + associated_nsg=$(az network vnet subnet show -g "$rg" --vnet-name "$vnet" -n "$subnet" --query "networkSecurityGroup.id" -o tsv 2>/dev/null || echo "") + if [[ "$associated_nsg" == *"$nsg"* ]]; then + echo "[OK] Verified subnet $subnet is associated with NSG $nsg." + else + echo "[ERROR] Subnet $subnet is NOT associated with NSG $nsg!" >&2 + exit 1 + fi +} + +# ------------------------------- +# 1. Create NSG +# ------------------------------- +echo "==> Creating Network Security Group: $NSG_NAME" +az network nsg create -g "$RG" -n "$NSG_NAME" -l "$LOCATION" --output none \ + && echo "[OK] NSG '$NSG_NAME' created." +verify_nsg "$RG" "$NSG_NAME" + +# ------------------------------- +# 2. Create NSG Rules +# ------------------------------- +echo "==> Creating NSG rule to DENY traffic from Subnet1 ($SUBNET1_PREFIX) to Subnet2 ($SUBNET2_PREFIX)" +az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_NAME" \ + --name deny-subnet1-to-subnet2 \ + --priority 100 \ + --source-address-prefixes "$SUBNET1_PREFIX" \ + --destination-address-prefixes "$SUBNET2_PREFIX" \ + --direction Inbound \ + --access Deny \ + --protocol "*" \ + --description "Deny all traffic from Subnet1 to Subnet2" \ + --output none \ + && echo "[OK] Deny rule from Subnet1 → Subnet2 created." + +verify_nsg_rule "$RG" "$NSG_NAME" "deny-subnet1-to-subnet2" + +echo "==> Creating NSG rule to DENY traffic from Subnet2 ($SUBNET2_PREFIX) to Subnet1 ($SUBNET1_PREFIX)" +az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_NAME" \ + --name deny-subnet2-to-subnet1 \ + --priority 200 \ + --source-address-prefixes "$SUBNET2_PREFIX" \ + --destination-address-prefixes "$SUBNET1_PREFIX" \ + --direction Inbound \ + --access Deny \ + --protocol "*" \ + --description "Deny all traffic from Subnet2 to Subnet1" \ + --output none \ + && echo "[OK] Deny rule from Subnet2 → Subnet1 created." + +verify_nsg_rule "$RG" "$NSG_NAME" "deny-subnet2-to-subnet1" + +# ------------------------------- +# 3. Associate NSG with Subnets +# ------------------------------- +for SUBNET in s1 s2; do + echo "==> Associating NSG $NSG_NAME with subnet $SUBNET" + az network vnet subnet update \ + --name "$SUBNET" \ + --vnet-name "$VNET_A1" \ + --resource-group "$RG" \ + --network-security-group "$NSG_NAME" \ + --output none + verify_subnet_nsg_association "$RG" "$VNET_A1" "$SUBNET" "$NSG_NAME" +done + +echo "NSG '$NSG_NAME' created successfully with bidirectional isolation between Subnet1 and Subnet2." + diff --git a/.pipelines/swiftv2-long-running/scripts/create_pe.sh b/.pipelines/swiftv2-long-running/scripts/create_pe.sh new file mode 100644 index 0000000000..c9f7e782e0 --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/create_pe.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +set -e +trap 'echo "[ERROR] Failed during Private Endpoint or DNS setup." >&2' ERR + +SUBSCRIPTION_ID=$1 +LOCATION=$2 +RG=$3 +SA1_NAME=$4 # Storage account 1 + +VNET_A1="cx_vnet_a1" +VNET_A2="cx_vnet_a2" +VNET_A3="cx_vnet_a3" +SUBNET_PE_A1="pe" +PE_NAME="${SA1_NAME}-pe" +PRIVATE_DNS_ZONE="privatelink.blob.core.windows.net" + +# ------------------------------- +# Function: Verify Resource Exists +# ------------------------------- +verify_dns_zone() { + local rg="$1"; local zone="$2" + echo "==> Verifying Private DNS zone: $zone" + if az network private-dns zone show -g "$rg" -n "$zone" &>/dev/null; then + echo "[OK] Verified DNS zone $zone exists." + else + echo "[ERROR] DNS zone $zone not found!" >&2 + exit 1 + fi +} + +verify_dns_link() { + local rg="$1"; local zone="$2"; local link="$3" + echo "==> Verifying DNS link: $link for zone $zone" + if az network private-dns link vnet show -g "$rg" --zone-name "$zone" -n "$link" &>/dev/null; then + echo "[OK] Verified DNS link $link exists." + else + echo "[ERROR] DNS link $link not found!" >&2 + exit 1 + fi +} + +verify_private_endpoint() { + local rg="$1"; local name="$2" + echo "==> Verifying Private Endpoint: $name" + if az network private-endpoint show -g "$rg" -n "$name" &>/dev/null; then + echo "[OK] Verified Private Endpoint $name exists." + else + echo "[ERROR] Private Endpoint $name not found!" >&2 + exit 1 + fi +} + +# 1. Create Private DNS zone +echo "==> Creating Private DNS zone: $PRIVATE_DNS_ZONE" +az network private-dns zone create -g "$RG" -n "$PRIVATE_DNS_ZONE" --output none \ + && echo "[OK] DNS zone $PRIVATE_DNS_ZONE created." + +verify_dns_zone "$RG" "$PRIVATE_DNS_ZONE" + +# 2. Link DNS zone to VNet +for VNET in "$VNET_A1" "$VNET_A2" "$VNET_A3"; do + LINK_NAME="${VNET}-link" + echo "==> Linking DNS zone $PRIVATE_DNS_ZONE to VNet $VNET" + az network private-dns link vnet create \ + -g "$RG" -n "$LINK_NAME" \ + --zone-name "$PRIVATE_DNS_ZONE" \ + --virtual-network "$VNET" \ + --registration-enabled false \ + --output none \ + && echo "[OK] Linked DNS zone to $VNET." + verify_dns_link "$RG" "$PRIVATE_DNS_ZONE" "$LINK_NAME" +done + +# 3. Create Private Endpoint +echo "==> Creating Private Endpoint for Storage Account: $SA1_NAME" +SA1_ID=$(az storage account show -g "$RG" -n "$SA1_NAME" --query id -o tsv) +az network private-endpoint create \ + -g "$RG" -n "$PE_NAME" -l "$LOCATION" \ + --vnet-name "$VNET_A1" --subnet "$SUBNET_PE_A1" \ + --private-connection-resource-id "$SA1_ID" \ + --group-id blob \ + --connection-name "${PE_NAME}-conn" \ + --output none \ + && echo "[OK] Private Endpoint $PE_NAME created for $SA1_NAME." +verify_private_endpoint "$RG" "$PE_NAME" + +echo "All Private DNS and Endpoint resources created and verified successfully." diff --git a/.pipelines/swiftv2-long-running/scripts/create_peerings.sh b/.pipelines/swiftv2-long-running/scripts/create_peerings.sh new file mode 100644 index 0000000000..d6655492f1 --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/create_peerings.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -e +trap 'echo "[ERROR] Failed during VNet peering creation." >&2' ERR + +RG=$1 +VNET_A1="cx_vnet_a1" +VNET_A2="cx_vnet_a2" +VNET_A3="cx_vnet_a3" +VNET_B1="cx_vnet_b1" + +verify_peering() { + local rg="$1"; local vnet="$2"; local peering="$3" + echo "==> Verifying peering $peering on $vnet..." + if az network vnet peering show -g "$rg" --vnet-name "$vnet" -n "$peering" --query "peeringState" -o tsv | grep -q "Connected"; then + echo "[OK] Peering $peering on $vnet is Connected." + else + echo "[ERROR] Peering $peering on $vnet not found or not Connected!" >&2 + exit 1 + fi +} + +peer_two_vnets() { + local rg="$1"; local v1="$2"; local v2="$3"; local name12="$4"; local name21="$5" + echo "==> Peering $v1 <-> $v2" + az network vnet peering create -g "$rg" -n "$name12" --vnet-name "$v1" --remote-vnet "$v2" --allow-vnet-access --output none \ + && echo "Created peering $name12" + az network vnet peering create -g "$rg" -n "$name21" --vnet-name "$v2" --remote-vnet "$v1" --allow-vnet-access --output none \ + && echo "Created peering $name21" + + # Verify both peerings are active + verify_peering "$rg" "$v1" "$name12" + verify_peering "$rg" "$v2" "$name21" +} + +peer_two_vnets "$RG" "$VNET_A1" "$VNET_A2" "A1-to-A2" "A2-to-A1" +peer_two_vnets "$RG" "$VNET_A2" "$VNET_A3" "A2-to-A3" "A3-to-A2" +peer_two_vnets "$RG" "$VNET_A1" "$VNET_A3" "A1-to-A3" "A3-to-A1" +echo "All VNet peerings created and verified successfully." diff --git a/.pipelines/swiftv2-long-running/scripts/create_storage.sh b/.pipelines/swiftv2-long-running/scripts/create_storage.sh new file mode 100644 index 0000000000..caefc69294 --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/create_storage.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +set -e +trap 'echo "[ERROR] Failed during Storage Account creation." >&2' ERR + +SUBSCRIPTION_ID=$1 +LOCATION=$2 +RG=$3 + +RAND=$(openssl rand -hex 4) +SA1="sa1${RAND}" +SA2="sa2${RAND}" + +# Set subscription context +az account set --subscription "$SUBSCRIPTION_ID" + +# Create storage accounts +for SA in "$SA1" "$SA2"; do + echo "==> Creating storage account $SA" + az storage account create \ + --name "$SA" \ + --resource-group "$RG" \ + --location "$LOCATION" \ + --sku Standard_LRS \ + --kind StorageV2 \ + --allow-blob-public-access false \ + --allow-shared-key-access false \ + --https-only true \ + --min-tls-version TLS1_2 \ + --query "name" -o tsv \ + && echo "Storage account $SA created successfully." + # Verify creation success + echo "==> Verifying storage account $SA exists..." + if az storage account show --name "$SA" --resource-group "$RG" &>/dev/null; then + echo "[OK] Storage account $SA verified successfully." + else + echo "[ERROR] Storage account $SA not found after creation!" >&2 + exit 1 + fi +done + +echo "All storage accounts created and verified successfully." + +# Set pipeline output variables +set +x +echo "##vso[task.setvariable variable=StorageAccount1;isOutput=true]$SA1" +echo "##vso[task.setvariable variable=StorageAccount2;isOutput=true]$SA2" +set -x diff --git a/.pipelines/swiftv2-long-running/scripts/create_vnets.sh b/.pipelines/swiftv2-long-running/scripts/create_vnets.sh new file mode 100644 index 0000000000..eb894d06ff --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/create_vnets.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +set -e +trap 'echo "[ERROR] Failed while creating VNets or subnets. Check Azure CLI logs above." >&2' ERR + +SUBSCRIPTION_ID=$1 +LOCATION=$2 +RG=$3 + +az account set --subscription "$SUBSCRIPTION_ID" + +# VNets and subnets +VNET_A1="cx_vnet_a1" +VNET_A2="cx_vnet_a2" +VNET_A3="cx_vnet_a3" +VNET_B1="cx_vnet_b1" + +A1_S1="10.10.1.0/24" +A1_S2="10.10.2.0/24" +A1_PE="10.10.100.0/24" + +A2_MAIN="10.11.1.0/24" + +A3_MAIN="10.12.1.0/24" + +B1_MAIN="10.20.1.0/24" + +# ------------------------------- +# Verification functions +# ------------------------------- +verify_vnet() { + local rg="$1"; local vnet="$2" + echo "==> Verifying VNet: $vnet" + if az network vnet show -g "$rg" -n "$vnet" &>/dev/null; then + echo "[OK] Verified VNet $vnet exists." + else + echo "[ERROR] VNet $vnet not found!" >&2 + exit 1 + fi +} + +verify_subnet() { + local rg="$1"; local vnet="$2"; local subnet="$3" + echo "==> Verifying subnet: $subnet in $vnet" + if az network vnet subnet show -g "$rg" --vnet-name "$vnet" -n "$subnet" &>/dev/null; then + echo "[OK] Verified subnet $subnet exists in $vnet." + else + echo "[ERROR] Subnet $subnet not found in $vnet!" >&2 + exit 1 + fi +} + +# ------------------------------- +# Create VNets and Subnets +# ------------------------------- +# A1 +az network vnet create -g "$RG" -n "$VNET_A1" --address-prefix 10.10.0.0/16 --subnet-name s1 --subnet-prefix "$A1_S1" -l "$LOCATION" --output none \ + && echo "Created $VNET_A1 with subnet s1" +az network vnet subnet create -g "$RG" --vnet-name "$VNET_A1" -n s2 --address-prefix "$A1_S2" --output none \ + && echo "Created $VNET_A1 with subnet s2" +az network vnet subnet create -g "$RG" --vnet-name "$VNET_A1" -n pe --address-prefix "$A1_PE" --output none \ + && echo "Created $VNET_A1 with subnet pe" +# Verify A1 +verify_vnet "$RG" "$VNET_A1" +for sn in s1 s2 pe; do verify_subnet "$RG" "$VNET_A1" "$sn"; done + +# A2 +az network vnet create -g "$RG" -n "$VNET_A2" --address-prefix 10.11.0.0/16 --subnet-name s1 --subnet-prefix "$A2_MAIN" -l "$LOCATION" --output none \ + && echo "Created $VNET_A2 with subnet s1" +verify_vnet "$RG" "$VNET_A2" +verify_subnet "$RG" "$VNET_A2" "s1" + +# A3 +az network vnet create -g "$RG" -n "$VNET_A3" --address-prefix 10.12.0.0/16 --subnet-name s1 --subnet-prefix "$A3_MAIN" -l "$LOCATION" --output none \ + && echo "Created $VNET_A3 with subnet s1" +verify_vnet "$RG" "$VNET_A3" +verify_subnet "$RG" "$VNET_A3" "s1" + +# B1 +az network vnet create -g "$RG" -n "$VNET_B1" --address-prefix 10.20.0.0/16 --subnet-name s1 --subnet-prefix "$B1_MAIN" -l "$LOCATION" --output none \ + && echo "Created $VNET_B1 with subnet s1" +verify_vnet "$RG" "$VNET_B1" +verify_subnet "$RG" "$VNET_B1" "s1" + +echo " All VNets and subnets created and verified successfully." diff --git a/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml new file mode 100644 index 0000000000..cc6016f17a --- /dev/null +++ b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml @@ -0,0 +1,140 @@ +parameters: + - name: subscriptionId + type: string + - name: location + type: string + - name: resourceGroupName + type: string + - name: vmSkuDefault + type: string + - name: vmSkuHighNIC + type: string + - name: serviceConnection + type: string + +stages: + - stage: AKSClusterAndNetworking + displayName: "Stage: AKS Cluster and Networking Setup" + jobs: + # ------------------------------------------------------------ + # Job 1: Create Resource Group + # ------------------------------------------------------------ + - job: CreateResourceGroup + displayName: "Create Resource Group" + pool: + vmImage: ubuntu-latest + steps: + - checkout: self + - task: AzureCLI@2 + displayName: "Create resource group" + inputs: + azureSubscription: ${{ parameters.serviceConnection }} + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Creating resource group ${{ parameters.resourceGroupName }} in ${{ parameters.location }}" + az group create \ + --name "${{ parameters.resourceGroupName }}" \ + --location "${{ parameters.location }}" \ + --subscription "${{ parameters.subscriptionId }}" + echo "Resource group created successfully." + + # ------------------------------------------------------------ + # Job 2: Create AKS Clusters + # ------------------------------------------------------------ + - job: CreateCluster + displayName: "Create AKS Clusters" + dependsOn: CreateResourceGroup + pool: + vmImage: ubuntu-latest + steps: + - checkout: self + - task: AzureCLI@2 + displayName: "Run create_aks.sh" + inputs: + azureSubscription: ${{ parameters.serviceConnection }} + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/create_aks.sh" + arguments: > + ${{ parameters.subscriptionId }} + ${{ parameters.location }} + ${{ parameters.resourceGroupName }} + ${{ parameters.vmSkuDefault }} + ${{ parameters.vmSkuHighNIC }} + + # ------------------------------------------------------------ + # Job 3: Networking & Storage + # ------------------------------------------------------------ + - job: NetworkingAndStorage + displayName: "Networking and Storage Setup" + dependsOn: CreateResourceGroup + pool: + vmImage: ubuntu-latest + steps: + - checkout: self + + # Task 1: Create VNets + - task: AzureCLI@2 + displayName: "Create customer vnets" + inputs: + azureSubscription: ${{ parameters.serviceConnection }} + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/create_vnets.sh" + arguments: > + ${{ parameters.subscriptionId }} + ${{ parameters.location }} + ${{ parameters.resourceGroupName }} + + # Task 2: Create Peerings + - task: AzureCLI@2 + displayName: "Create customer vnet peerings" + inputs: + azureSubscription: ${{ parameters.serviceConnection }} + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/create_peerings.sh" + arguments: > + ${{ parameters.resourceGroupName }} + + # Task 3: Create Storage Accounts + - task: AzureCLI@2 + name: CreateStorageAccounts + displayName: "Create storage accounts" + inputs: + azureSubscription: ${{ parameters.serviceConnection }} + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/create_storage.sh" + arguments: > + ${{ parameters.subscriptionId }} + ${{ parameters.location }} + ${{ parameters.resourceGroupName }} + + # Task 4: Create NSG + - task: AzureCLI@2 + displayName: "Create network security groups to restrict access between subnets" + inputs: + azureSubscription: ${{ parameters.serviceConnection }} + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/create_nsg.sh" + arguments: > + ${{ parameters.subscriptionId }} + ${{ parameters.resourceGroupName }} + ${{ parameters.location }} + + # Task 5: Create Private Endpoint + - task: AzureCLI@2 + displayName: "Create Private Endpoint for Storage Account" + inputs: + azureSubscription: ${{ parameters.serviceConnection }} + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/create_pe.sh" + arguments: > + ${{ parameters.subscriptionId }} + ${{ parameters.location }} + ${{ parameters.resourceGroupName }} + $(CreateStorageAccounts.StorageAccount1) diff --git a/hack/aks/Makefile b/hack/aks/Makefile index a5011611f9..5e1c8f3f9b 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -285,6 +285,20 @@ swiftv2-dummy-cluster-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI clus --yes @$(MAKE) set-kubeconf +swiftv2-podsubnet-cluster-up: ipv4 swift-net-up ## Bring up a SWIFTv2 PodSubnet cluster + $(COMMON_AKS_FIELDS) + --network-plugin azure \ + --nodepool-name nodepool1 \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ + --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ + --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ + --service-cidr "10.0.0.0/16" \ + --dns-service-ip "10.0.0.10" \ + --tags fastpathenabled=true RGOwner=LongRunningTestPipelines stampcreatorserviceinfo=true \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/NetworkingMultiTenancyPreview \ + --yes + @$(MAKE) set-kubeconf + # The below Vnet Scale clusters are currently only in private preview and available with Kubernetes 1.28 # These AKS clusters can only be created in a limited subscription listed here: # https://dev.azure.com/msazure/CloudNativeCompute/_git/aks-rp?path=/resourceprovider/server/microsoft.com/containerservice/flags/network_flags.go&version=GBmaster&line=134&lineEnd=135&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents @@ -424,6 +438,18 @@ windows-swift-nodepool-up: ## Add windows node pool --subscription $(SUB) \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet +linux-swiftv2-nodepool-up: ## Add linux node pool to swiftv2 cluster + $(AZCLI) aks nodepool add -g $(GROUP) -n nplinux \ + --node-count $(NODE_COUNT) \ + --node-vm-size $(VM_SIZE) \ + --cluster-name $(CLUSTER) \ + --os-type Linux \ + --max-pods 250 \ + --subscription $(SUB) \ + --tags fastpathenabled=true,aks-nic-enable-multi-tenancy=true \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/NetworkingMultiTenancyPreview \ + --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet + down: ## Delete the cluster $(AZCLI) aks delete -g $(GROUP) -n $(CLUSTER) --yes @$(MAKE) unset-kubeconf From a2dbdb3eab9a7b770f35d1da219d70d3afb98171 Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Wed, 5 Nov 2025 11:31:21 -0800 Subject: [PATCH 17/47] ci: clean up disk space before build and skip windows port forward (#4109) * clean up disk space before build * skip portforward e2e in windows --- .pipelines/containers/container-template.yaml | 29 ++++++++++- .pipelines/pipeline.yaml | 49 +++++++++++++++---- .../aks-swift/e2e-job-template.yaml | 2 +- .../singletenancy/aks-swift/e2e.stages.yaml | 2 +- .../singletenancy/aks/e2e-job-template.yaml | 2 +- .pipelines/singletenancy/aks/e2e.stages.yaml | 2 +- ...ni-overlay-stateless-e2e-job-template.yaml | 2 +- ...zure-cni-overlay-stateless-e2e.stages.yaml | 2 +- .../azure-cni-overlay-e2e-job-template.yaml | 2 +- .../azure-cni-overlay-e2e.stages.yaml | 2 +- 10 files changed, 76 insertions(+), 18 deletions(-) diff --git a/.pipelines/containers/container-template.yaml b/.pipelines/containers/container-template.yaml index aaf54b8e8e..b4daab2083 100644 --- a/.pipelines/containers/container-template.yaml +++ b/.pipelines/containers/container-template.yaml @@ -15,11 +15,38 @@ steps: - script: | set -e + echo "Disk space before cleanup..." + df -h / + echo "Removing unnecessary files to free up disk space..." + sudo rm -rf \ + /opt/hostedtoolcache \ + /opt/google/chrome \ + /opt/microsoft/msedge \ + /opt/microsoft/powershell \ + /opt/pipx \ + /usr/lib/mono \ + /usr/local/julia* \ + /usr/local/lib/android \ + /usr/local/lib/node_modules \ + /usr/local/share/chromium \ + /usr/local/share/powershell \ + /usr/share/dotnet \ + /usr/share/swift + echo "Disk space after cleanup..." + df -h / + displayName: "Clean up disk space" + +- script: | + set -e + echo "=== Disk space BEFORE make image ===" + df -h if [ ${{ parameters.os }} = 'windows' ]; then export BUILDX_ACTION='--push'; fi make ${{ parameters.name }}-image OS=${{ parameters.os }} ARCH=${{ parameters.arch }} + echo "=== Disk space AFTER make image ===" + df -h name: image_build displayName: Image Build - retryCountOnTaskFailure: 3 + retryCountOnTaskFailure: 2 - task: AzureCLI@2 displayName: "Logout" diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index 4bf015e1f0..a249d127ca 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -148,14 +148,6 @@ stages: arch: amd64 name: ipv6-hp-bpf os: linux - npm_linux_amd64: - arch: amd64 - name: npm - os: linux - npm_windows_amd64: - arch: amd64 - name: npm - os: windows steps: - template: containers/container-template.yaml parameters: @@ -193,6 +185,45 @@ stages: arch: arm64 name: ipv6-hp-bpf os: linux + steps: + - template: containers/container-template.yaml + parameters: + arch: $(arch) + name: $(name) + os: $(os) + + - stage: containerize_npm + displayName: Build NPM Images + dependsOn: + - setup + jobs: + - job: containerize_npm_amd64 + displayName: Build NPM Images + pool: + name: "$(BUILD_POOL_NAME_LINUX_AMD64)" + strategy: + matrix: + npm_linux_amd64: + arch: amd64 + name: npm + os: linux + npm_windows_amd64: + arch: amd64 + name: npm + os: windows + steps: + - template: containers/container-template.yaml + parameters: + arch: $(arch) + name: $(name) + os: $(os) + os_version: $(os_version) + - job: containerize_npm_linux_arm64 + displayName: Build NPM Images + pool: + name: "$(BUILD_POOL_NAME_LINUX_ARM64)" + strategy: + matrix: npm_linux_arm64: arch: arm64 name: npm @@ -244,7 +275,7 @@ stages: - stage: publish_npm displayName: Publish NPM Multiarch Manifest dependsOn: - - containerize + - containerize_npm jobs: - job: npm_manifest displayName: Compile NPM Manifest diff --git a/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml b/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml index 64b612da45..d3b754b1bc 100644 --- a/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml +++ b/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml @@ -71,7 +71,7 @@ stages: dependsOn: ${{ parameters.name }} datapath: true dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios hostport: true service: true diff --git a/.pipelines/singletenancy/aks-swift/e2e.stages.yaml b/.pipelines/singletenancy/aks-swift/e2e.stages.yaml index 714a559771..ed9c149342 100644 --- a/.pipelines/singletenancy/aks-swift/e2e.stages.yaml +++ b/.pipelines/singletenancy/aks-swift/e2e.stages.yaml @@ -76,7 +76,7 @@ stages: dependsOn: ${{ parameters.name }} datapath: true dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios hostport: true service: true diff --git a/.pipelines/singletenancy/aks/e2e-job-template.yaml b/.pipelines/singletenancy/aks/e2e-job-template.yaml index 5a8c3c28b5..d7de01afcb 100644 --- a/.pipelines/singletenancy/aks/e2e-job-template.yaml +++ b/.pipelines/singletenancy/aks/e2e-job-template.yaml @@ -74,7 +74,7 @@ stages: os: ${{ parameters.os }} datapath: true dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios hybridWin: true service: true hostport: true diff --git a/.pipelines/singletenancy/aks/e2e.stages.yaml b/.pipelines/singletenancy/aks/e2e.stages.yaml index 885e1d1043..7f5d4e2a59 100644 --- a/.pipelines/singletenancy/aks/e2e.stages.yaml +++ b/.pipelines/singletenancy/aks/e2e.stages.yaml @@ -80,7 +80,7 @@ stages: os: ${{ parameters.os }} datapath: true dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios hybridWin: true service: true hostport: true diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml index 3c955293d3..ca44ef3129 100644 --- a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml @@ -72,7 +72,7 @@ stages: dependsOn: ${{ parameters.name }}_windows datapath: true dns: true - portforward: true + portforward: false # Unblock Pipeline, as stateless is tested in windows, broken for all windows scenarios hostport: true service: true hybridWin: true diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml index 2a2c69f1fb..9c9eecda31 100644 --- a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml @@ -78,7 +78,7 @@ stages: dependsOn: ${{ parameters.name }}_windows datapath: true dns: true - portforward: true + portforward: false # Unblock Pipeline, as stateless is tested in windows, broken for all windows scenarios hostport: true service: true hybridWin: true diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml index 62b1d0a6fa..5208cb98c8 100644 --- a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml @@ -71,7 +71,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} datapath: true dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios hostport: true service: true hybridWin: ${{ eq(parameters.os, 'windows') }} diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml index bbce0bac1f..ed4a46bc84 100644 --- a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml @@ -77,7 +77,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} datapath: true dns: true - portforward: true + portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios hostport: true service: true hybridWin: ${{ eq(parameters.os, 'windows') }} From a683f4d3c4de5d21e4a532a16b6c5c2b5c9ccd3b Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Wed, 5 Nov 2025 11:32:40 -0800 Subject: [PATCH 18/47] ci: remove debug stack usage (#4075) remove debug stack usage --- cni/network/invoker_azure.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/cni/network/invoker_azure.go b/cni/network/invoker_azure.go index 6cd3940eff..a344a672a9 100644 --- a/cni/network/invoker_azure.go +++ b/cni/network/invoker_azure.go @@ -1,10 +1,10 @@ package network import ( + "errors" "fmt" "net" "os" - "runtime/debug" "strings" "github.com/Azure/azure-container-networking/cni" @@ -20,7 +20,11 @@ import ( "go.uber.org/zap" ) -var logger = log.CNILogger.With(zap.String("component", "cni-net")) +var ( + logger = log.CNILogger.With(zap.String("component", "cni-net")) + errNilNetworkConfig = errors.New("network config is nil") + errInvalidIPAddress = errors.New("invalid ip address") +) const ( bytesSize4 = 4 @@ -50,7 +54,7 @@ func (invoker *AzureIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, er addResult := IPAMAddResult{interfaceInfo: make(map[string]network.InterfaceInfo)} if addConfig.nwCfg == nil { - return addResult, invoker.plugin.Errorf("nil nwCfg passed to CNI ADD, stack: %+v", string(debug.Stack())) + return addResult, invoker.plugin.Errorf("nil nwCfg passed to CNI ADD: %v", errNilNetworkConfig) } if len(invoker.nwInfo.Subnets) > 0 { @@ -161,7 +165,7 @@ func (invoker *AzureIPAMInvoker) deleteIpamState() { func (invoker *AzureIPAMInvoker) Delete(address *net.IPNet, nwCfg *cni.NetworkConfig, _ *cniSkel.CmdArgs, options map[string]interface{}) error { //nolint if nwCfg == nil { - return invoker.plugin.Errorf("nil nwCfg passed to CNI ADD, stack: %+v", string(debug.Stack())) + return invoker.plugin.Errorf("nil nwCfg passed to CNI DEL: %v", errNilNetworkConfig) } if len(invoker.nwInfo.Subnets) > 0 { @@ -203,7 +207,7 @@ func (invoker *AzureIPAMInvoker) Delete(address *net.IPNet, nwCfg *cni.NetworkCo return invoker.plugin.Errorf("Failed to release ipv6 address: %v", err) } } else { - return invoker.plugin.Errorf("Address is incorrect, not valid IPv4 or IPv6, stack: %+v", string(debug.Stack())) + return invoker.plugin.Errorf("address is incorrect, not valid IPv4 or IPv6: %v", errInvalidIPAddress) } return nil From 8581bbe4ca73d30fd0809f3ac7f0e624475de583 Mon Sep 17 00:00:00 2001 From: ZetaoZhuang <106119232+ZetaoZhuang@users.noreply.github.com> Date: Wed, 5 Nov 2025 14:43:31 -0800 Subject: [PATCH 19/47] filtering mTLS connections based on the subject name from Caller (#4081) * filtering TLS connections based on the subject name from Caller * add validation to client rawCerts * fix lint * update config name and error msgs * update variable name in tlsconfig * renaming var in tlssetting * custom SN verification in verifyPeerCertificate * add minor validation * update err msg * address comment * address comment --- cns/configuration/cns_config.json | 3 +- cns/configuration/configuration.go | 1 + cns/configuration/configuration_test.go | 9 +- cns/service.go | 55 +++++++- cns/service/main.go | 1 + cns/service_test.go | 160 +++++++++++++++++------- server/tls/tlscertificate_retriever.go | 1 + 7 files changed, 183 insertions(+), 47 deletions(-) diff --git a/cns/configuration/cns_config.json b/cns/configuration/cns_config.json index 81ef6c9b05..967089d142 100644 --- a/cns/configuration/cns_config.json +++ b/cns/configuration/cns_config.json @@ -35,5 +35,6 @@ "AZRSettings": { "PopulateHomeAzCacheRetryIntervalSecs": 60 }, - "MinTLSVersion": "TLS 1.2" + "MinTLSVersion": "TLS 1.2", + "MtlsClientCertSubjectName": "" } diff --git a/cns/configuration/configuration.go b/cns/configuration/configuration.go index 9ec5f8664f..b5fc0e4114 100644 --- a/cns/configuration/configuration.go +++ b/cns/configuration/configuration.go @@ -59,6 +59,7 @@ type CNSConfig struct { WireserverIP string GRPCSettings GRPCSettings MinTLSVersion string + MtlsClientCertSubjectName string } type TelemetrySettings struct { diff --git a/cns/configuration/configuration_test.go b/cns/configuration/configuration_test.go index 186c92c376..ab3d93ebd1 100644 --- a/cns/configuration/configuration_test.go +++ b/cns/configuration/configuration_test.go @@ -222,7 +222,8 @@ func TestSetCNSConfigDefaults(t *testing.T) { IPAddress: "localhost", Port: 8080, }, - MinTLSVersion: "TLS 1.2", + MinTLSVersion: "TLS 1.2", + MtlsClientCertSubjectName: "", }, }, { @@ -253,7 +254,8 @@ func TestSetCNSConfigDefaults(t *testing.T) { IPAddress: "192.168.1.1", Port: 9090, }, - MinTLSVersion: "TLS 1.3", + MinTLSVersion: "TLS 1.3", + MtlsClientCertSubjectName: "example.com", }, want: CNSConfig{ ChannelMode: "Other", @@ -283,7 +285,8 @@ func TestSetCNSConfigDefaults(t *testing.T) { IPAddress: "192.168.1.1", Port: 9090, }, - MinTLSVersion: "TLS 1.3", + MinTLSVersion: "TLS 1.3", + MtlsClientCertSubjectName: "example.com", }, }, } diff --git a/cns/service.go b/cns/service.go index ab7a0be3c3..5ee249c9fc 100644 --- a/cns/service.go +++ b/cns/service.go @@ -156,6 +156,54 @@ func getTLSConfig(tlsSettings localtls.TlsSettings, errChan chan<- error) (*tls. return nil, errors.Errorf("invalid tls settings: %+v", tlsSettings) } +// verifyPeerCertificate verifies the client certificate's subject name matches the expected subject name. +func verifyPeerCertificate(verifiedChains [][]*x509.Certificate, clientSubjectName string) error { + // no client subject name provided, skip verification + if clientSubjectName == "" { + return nil + } + + if len(verifiedChains) == 0 || len(verifiedChains[0]) == 0 { + return errors.New("no client certificate provided during mTLS") + } + + // Get client leaf certificate + clientCert := verifiedChains[0][0] + // Match DNS names (case-insensitive) + dnsNames := clientCert.DNSNames + for _, dns := range dnsNames { + if strings.EqualFold(dns, clientSubjectName) { + return nil + } + } + + // If SANs didn't match, fall back to Common Name (CN) match. + clientCN := clientCert.Subject.CommonName + if clientCN != "" && strings.EqualFold(clientCN, clientSubjectName) { + return nil + } + + // maskHalf of the DNS names + maskedDNS := make([]string, len(dnsNames)) + for i, dns := range dnsNames { + maskedDNS[i] = maskHalf(dns) + } + + return errors.Errorf("Failed to verify client certificate subject name during mTLS, clientSubjectName: %s, client cert SANs: %+v, clientCN: %s", + clientSubjectName, maskedDNS, maskHalf(clientCN)) +} + +// maskHalf masks half of the input string with asterisks. +func maskHalf(s string) string { + n := len(s) + if n == 0 { + return s + } + + half := n / 2 + return s[:half] + strings.Repeat("*", n-half) +} + func getTLSConfigFromFile(tlsSettings localtls.TlsSettings) (*tls.Config, error) { tlsCertRetriever, err := localtls.GetTlsCertificateRetriever(tlsSettings) if err != nil { @@ -202,8 +250,10 @@ func getTLSConfigFromFile(tlsSettings localtls.TlsSettings) (*tls.Config, error) tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.ClientCAs = rootCAs tlsConfig.RootCAs = rootCAs + tlsConfig.VerifyPeerCertificate = func(_ [][]byte, verifiedChains [][]*x509.Certificate) error { + return verifyPeerCertificate(verifiedChains, tlsSettings.MtlsClientCertSubjectName) + } } - logger.Debugf("TLS configured successfully from file: %+v", tlsSettings) return tlsConfig, nil @@ -254,6 +304,9 @@ func getTLSConfigFromKeyVault(tlsSettings localtls.TlsSettings, errChan chan<- e tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.ClientCAs = rootCAs tlsConfig.RootCAs = rootCAs + tlsConfig.VerifyPeerCertificate = func(_ [][]byte, verifiedChains [][]*x509.Certificate) error { + return verifyPeerCertificate(verifiedChains, tlsSettings.MtlsClientCertSubjectName) + } } logger.Debugf("TLS configured successfully from KV: %+v", tlsSettings) diff --git a/cns/service/main.go b/cns/service/main.go index 67f7872f44..d7b9a526d5 100644 --- a/cns/service/main.go +++ b/cns/service/main.go @@ -810,6 +810,7 @@ func main() { KeyVaultCertificateRefreshInterval: time.Duration(cnsconfig.KeyVaultSettings.RefreshIntervalInHrs) * time.Hour, UseMTLS: cnsconfig.UseMTLS, MinTLSVersion: cnsconfig.MinTLSVersion, + MtlsClientCertSubjectName: cnsconfig.MtlsClientCertSubjectName, } } diff --git a/cns/service_test.go b/cns/service_test.go index d20c2ef11a..fd0b7a44b4 100644 --- a/cns/service_test.go +++ b/cns/service_test.go @@ -133,57 +133,108 @@ func TestNewService(t *testing.T) { t.Run("NewServiceWithMutualTLS", func(t *testing.T) { testCertFilePath := createTestCertificate(t) - config.TLSSettings = serverTLS.TlsSettings{ - TLSPort: "10091", - TLSSubjectName: "localhost", - TLSCertificatePath: testCertFilePath, - UseMTLS: true, - MinTLSVersion: "TLS 1.2", + cases := []struct { + name string + tlsSettings serverTLS.TlsSettings + handshakeFailureExpected bool + }{ + { + name: "matching client SANs", + tlsSettings: serverTLS.TlsSettings{ + TLSPort: "10091", + TLSSubjectName: "localhost", + TLSCertificatePath: testCertFilePath, + UseMTLS: true, + MinTLSVersion: "TLS 1.2", + MtlsClientCertSubjectName: "example.com", + }, + handshakeFailureExpected: false, + }, + { + name: "matching client cert CN", + tlsSettings: serverTLS.TlsSettings{ + TLSPort: "10093", + TLSSubjectName: "localhost", + TLSCertificatePath: testCertFilePath, + UseMTLS: true, + MinTLSVersion: "TLS 1.2", + MtlsClientCertSubjectName: "foo.com", // Common Name from test certificate + }, + handshakeFailureExpected: false, + }, + { + name: "failing to match client SANs and CN", + tlsSettings: serverTLS.TlsSettings{ + TLSPort: "10092", + TLSSubjectName: "localhost", + TLSCertificatePath: testCertFilePath, + UseMTLS: true, + MinTLSVersion: "TLS 1.2", + MtlsClientCertSubjectName: "random.com", + }, + handshakeFailureExpected: true, + }, } - svc, err := NewService(config.Name, config.Version, config.ChannelMode, config.Store) - require.NoError(t, err) - require.IsType(t, &Service{}, svc) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + config.TLSSettings = tc.tlsSettings - svc.SetOption(acn.OptCnsURL, "") - svc.SetOption(acn.OptCnsPort, "") + svc, err := NewService(config.Name, config.Version, config.ChannelMode, config.Store) + require.NoError(t, err) + require.IsType(t, &Service{}, svc) - err = svc.Initialize(config) - t.Cleanup(func() { - svc.Uninitialize() - }) - require.NoError(t, err) + svc.SetOption(acn.OptCnsURL, "") + svc.SetOption(acn.OptCnsPort, "") - err = svc.StartListener(config) - require.NoError(t, err) + err = svc.Initialize(config) + require.NoError(t, err) - mTLSConfig, err := getTLSConfigFromFile(config.TLSSettings) - require.NoError(t, err) + err = svc.StartListener(config) + require.NoError(t, err) - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: mTLSConfig, - }, - } + mTLSConfig, err := getTLSConfigFromFile(config.TLSSettings) + require.NoError(t, err) - // TLS listener - req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, "https://localhost:10091", http.NoBody) - require.NoError(t, err) - resp, err := client.Do(req) - t.Cleanup(func() { - resp.Body.Close() - }) - require.NoError(t, err) + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: mTLSConfig, + }, + } - // HTTP listener - httpClient := &http.Client{} - req, err = http.NewRequestWithContext(context.TODO(), http.MethodGet, "http://localhost:10090", http.NoBody) - require.NoError(t, err) - resp, err = httpClient.Do(req) - t.Cleanup(func() { - resp.Body.Close() - }) - require.NoError(t, err) + tlsURL := "https://localhost:" + tc.tlsSettings.TLSPort + // TLS listener + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, tlsURL, http.NoBody) + require.NoError(t, err) + resp, err := client.Do(req) + if tc.handshakeFailureExpected { + require.Error(t, err) + require.ErrorContains(t, err, "Failed to verify client certificate subject name during mTLS") + } else { + require.NoError(t, err) + t.Cleanup(func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }) + } + + // HTTP listener + httpClient := &http.Client{} + req, err = http.NewRequestWithContext(context.TODO(), http.MethodGet, "http://localhost:10090", http.NoBody) + require.NoError(t, err) + resp, err = httpClient.Do(req) + require.NoError(t, err) + t.Cleanup(func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }) + + // Cleanup + svc.Uninitialize() + }) + } }) } @@ -355,3 +406,28 @@ func TestTLSVersionNumber(t *testing.T) { require.NoError(t, err) }) } + +func TestMaskHalf(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + {"empty", "", ""}, + {"one char string", "e", "*"}, + {"two chars string", "ex", "e*"}, + {"three chars string", "exa", "e**"}, + {"four chars string", "exam", "ex**"}, + {"five chars string", "examp", "ex***"}, + {"long string", "example.com", "examp******"}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := maskHalf(tc.in) + if got != tc.want { + t.Fatalf("maskHalf(%s) = %s, want %s", tc.in, got, tc.want) + } + }) + } +} diff --git a/server/tls/tlscertificate_retriever.go b/server/tls/tlscertificate_retriever.go index a22a7336b7..b6a0d11099 100644 --- a/server/tls/tlscertificate_retriever.go +++ b/server/tls/tlscertificate_retriever.go @@ -15,6 +15,7 @@ type TlsSettings struct { KeyVaultCertificateRefreshInterval time.Duration UseMTLS bool MinTLSVersion string + MtlsClientCertSubjectName string } func GetTlsCertificateRetriever(settings TlsSettings) (TlsCertificateRetriever, error) { From 6bd4fde38cec60b234d5001367b42bb9c527ae1d Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 6 Nov 2025 08:45:59 -0500 Subject: [PATCH 20/47] Return nil when HNS endpoint ID is empty during deletion (#4105) * Initial plan * Fix: Return nil instead of error when HNS id is not found during endpoint deletion Co-authored-by: behzad-mir <13154712+behzad-mir@users.noreply.github.com> * Complete fix for HNS endpoint deletion issue Co-authored-by: behzad-mir <13154712+behzad-mir@users.noreply.github.com> * Fix: fix hns return in case of emty HNS ID --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: behzad-mir <13154712+behzad-mir@users.noreply.github.com> Co-authored-by: Behzad Mirkhanzadeh --- network/endpoint_windows.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index edd52327f2..52ef0eff5d 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -530,8 +530,8 @@ func (nw *network) deleteEndpointImpl(_ netlink.NetlinkInterface, _ platform.Exe } if ep.HnsId == "" { - logger.Error("No HNS id found. Skip endpoint deletion", zap.Any("nicType", ep.NICType), zap.String("containerId", ep.ContainerID)) - return fmt.Errorf("No HNS id found. Skip endpoint deletion for nicType %v, containerID %s", ep.NICType, ep.ContainerID) //nolint + logger.Info("No HNS id found. Skip endpoint deletion", zap.Any("nicType", ep.NICType), zap.String("containerId", ep.ContainerID)) + return nil } if useHnsV2, err := UseHnsV2(ep.NetNs); useHnsV2 { From 3a7c759af8a03c143d1a26246e0ee15a8a2d0906 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Nov 2025 13:47:49 +0000 Subject: [PATCH 21/47] ci: bump actions/upload-artifact from 4 to 5 (#4102) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cyclonus-netpol-extended-nightly-test.yaml | 2 +- .github/workflows/cyclonus-netpol-test.yaml | 2 +- .github/workflows/golangci.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml b/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml index 899481ce29..4ee521a40c 100644 --- a/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml +++ b/.github/workflows/cyclonus-netpol-extended-nightly-test.yaml @@ -71,7 +71,7 @@ jobs: mv ./test/cyclonus/cyclonus-test.txt ./cyclonus-test_${{ matrix.profile }}.txt - name: "Upload Logs" - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 if: always() with: name: logs-${{ matrix.profile }} diff --git a/.github/workflows/cyclonus-netpol-test.yaml b/.github/workflows/cyclonus-netpol-test.yaml index e7f4f28f6d..bbe349be4b 100644 --- a/.github/workflows/cyclonus-netpol-test.yaml +++ b/.github/workflows/cyclonus-netpol-test.yaml @@ -78,7 +78,7 @@ jobs: mv ./test/cyclonus/cyclonus-test.txt ./cyclonus-test_${{ matrix.profile }}.txt - name: 'Upload Logs' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 if: always() with: name: logs-${{ matrix.profile }} diff --git a/.github/workflows/golangci.yaml b/.github/workflows/golangci.yaml index c2e9b7d3fc..9846c11498 100644 --- a/.github/workflows/golangci.yaml +++ b/.github/workflows/golangci.yaml @@ -26,7 +26,7 @@ jobs: run: make bpf-lib && go generate ./... - name: Upload generated code - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: generated-bpf-program-code path: ./bpf-prog/azure-block-iptables/pkg/blockservice From cf87eab93663326c653d88cf6d7660fef7e5583f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Nov 2025 11:46:16 -0500 Subject: [PATCH 22/47] ci: bump actions/download-artifact from 4 to 6 (#4101) Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4 to 6. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v4...v6) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci.yaml b/.github/workflows/golangci.yaml index 9846c11498..59661b61c9 100644 --- a/.github/workflows/golangci.yaml +++ b/.github/workflows/golangci.yaml @@ -46,7 +46,7 @@ jobs: with: go-version-file: go.mod - name: Download generated code - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: generated-bpf-program-code path: ./bpf-prog/azure-block-iptables/pkg/blockservice From 3e6d8a8497454418e4a02f9d2b146ef87964808e Mon Sep 17 00:00:00 2001 From: tamilmani1989 Date: Tue, 11 Nov 2025 10:39:00 -0800 Subject: [PATCH 23/47] feature: Adding apipa nic support for swiftv2 windows (#4012) * refactor: Update apipa nic as separate entry in podIPInfo This PR updates both CNS and CNI code to construct apipa nic as separate entry in podIpInfo if either of allowhostonc or allownctohost set. This allows CNI to treat this as separate endpoint and align with current cni design/model of 1 nic per endpoint info. CNI then iterates through endpoint info and creates one nic at a time. Signed-off-by: Tamilmani * Introduced NetworkContainerID, AllowHostToNC, AllowNCToHost fields in podipinfo to support apipa endpoint create request. persist networkcontainer id in cns state to support delete apipa endpoint when called as endpoint name based on networkcontainerid. Signed-off-by: Tamilmani * dummy create network for apipa nic as like backend nic Signed-off-by: Tamilmani * Added DeleteEndpointState handler for removing endpoint state from CNS in stateless cni case Signed-off-by: Tamilmani * add config for disabling async delete Signed-off-by: Tamilmani * lint fixes and address comment Signed-off-by: Tamilmani * Address review comments Signed-off-by: Tamilmani --------- Signed-off-by: Tamilmani --- cni/netconfig.go | 1 + cni/network/invoker_cns.go | 33 ++- cni/network/invoker_cns_test.go | 312 ++++++++++++++++++++++++++++ cni/network/network.go | 17 +- cni/network/network_windows.go | 5 +- cni/network/network_windows_test.go | 39 ++++ cns/Dockerfile | 2 +- cns/NetworkContainerContract.go | 9 + cns/client/client.go | 34 +++ cns/restserver/ipam.go | 91 +++++++- cns/restserver/ipam_test.go | 11 +- cns/restserver/restserver.go | 15 +- network/endpoint.go | 8 +- network/endpoint_windows.go | 44 +++- network/endpoint_windows_test.go | 6 +- network/manager.go | 56 +++-- network/network.go | 3 +- network/network_windows.go | 3 +- 18 files changed, 632 insertions(+), 57 deletions(-) diff --git a/cni/netconfig.go b/cni/netconfig.go index c7e0c0ca7e..f69a5bcbe2 100644 --- a/cni/netconfig.go +++ b/cni/netconfig.go @@ -72,6 +72,7 @@ type NetworkConfig struct { EnableExactMatchForPodName bool `json:"enableExactMatchForPodName,omitempty"` DisableHairpinOnHostInterface bool `json:"disableHairpinOnHostInterface,omitempty"` DisableIPTableLock bool `json:"disableIPTableLock,omitempty"` + DisableAsyncDelete bool `json:"disableAsyncDelete,omitempty"` CNSUrl string `json:"cnsurl,omitempty"` ExecutionMode string `json:"executionMode,omitempty"` IPAM IPAM `json:"ipam,omitempty"` diff --git a/cni/network/invoker_cns.go b/cni/network/invoker_cns.go index 2754b2d886..8511451986 100644 --- a/cni/network/invoker_cns.go +++ b/cni/network/invoker_cns.go @@ -169,7 +169,7 @@ func (invoker *CNSIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, erro } logger.Info("Received info for pod", - zap.Any("ipInfo", info), + zap.Any("ipInfo", response.PodIPInfo[i]), zap.Any("podInfo", podInfo)) //nolint:exhaustive // ignore exhaustive types check @@ -196,6 +196,11 @@ func (invoker *CNSIPAMInvoker) Add(addConfig IPAMAddConfig) (IPAMAddResult, erro if err := addBackendNICToResult(&info, &addResult, key); err != nil { return IPAMAddResult{}, err } + case cns.ApipaNIC: + if err := configureApipaAddResult(&addResult, &response.PodIPInfo[i], key); err != nil { + return IPAMAddResult{}, err + } + case cns.InfraNIC, "": // if we change from legacy cns, the nicType will be empty, so we assume it is infra nic info.nicType = cns.InfraNIC @@ -556,6 +561,32 @@ func BuildIPConfigForV6(secondaryIPs map[string]cns.SecondaryIPConfig, gatewayIP return network.IPConfig{}, fmt.Errorf("map is empty") } +func configureApipaAddResult(addResult *IPAMAddResult, info *cns.PodIpInfo, key string) error { + ip, ipnet, err := info.PodIPConfig.GetIPNet() + if ip == nil { + return errors.Wrap(err, "GetIPNet failed while configuring apipa AddResult") + } + + addResult.interfaceInfo[key] = network.InterfaceInfo{ + IPConfigs: []*network.IPConfig{ + { + Address: net.IPNet{ + IP: ip, + Mask: ipnet.Mask, + }, + Gateway: net.ParseIP(info.NetworkContainerPrimaryIPConfig.GatewayIPAddress), + }, + }, + NICType: info.NICType, + SkipDefaultRoutes: true, + NetworkContainerID: info.NetworkContainerID, + AllowHostToNCCommunication: info.AllowHostToNCCommunication, + AllowNCToHostCommunication: info.AllowNCToHostCommunication, + } + + return nil +} + func addBackendNICToResult(info *IPResultInfo, addResult *IPAMAddResult, key string) error { macAddress, err := net.ParseMAC(info.macAddress) if err != nil { diff --git a/cni/network/invoker_cns_test.go b/cni/network/invoker_cns_test.go index b28798cc28..311a231845 100644 --- a/cni/network/invoker_cns_test.go +++ b/cni/network/invoker_cns_test.go @@ -2298,3 +2298,315 @@ func TestMultipleIBNICsToResult(t *testing.T) { }) } } + +func TestCNSIPAMInvoker_Add_ApipaNIC(t *testing.T) { + require := require.New(t) + + type fields struct { + podName string + podNamespace string + cnsClient cnsclient + ipamMode util.IpamMode + } + type args struct { + nwCfg *cni.NetworkConfig + args *cniSkel.CmdArgs + options map[string]interface{} + } + + tests := []struct { + name string + fields fields + args args + wantApipaResult network.InterfaceInfo + wantErr bool + wantErrMsg string + }{ + { + name: "Test CNI Add with InfraNIC + ApipaNIC", + fields: fields{ + podName: testPodInfo.PodName, + podNamespace: testPodInfo.PodNamespace, + cnsClient: &MockCNSClient{ + require: require, + requestIPs: requestIPsHandler{ + ipconfigArgument: cns.IPConfigsRequest{ + PodInterfaceID: "testcont-testifname", + InfraContainerID: "testcontainerid", + OrchestratorContext: marshallPodInfo(testPodInfo), + }, + result: &cns.IPConfigsResponse{ + PodIPInfo: []cns.PodIpInfo{ + { + PodIPConfig: cns.IPSubnet{ + IPAddress: "10.0.1.10", + PrefixLength: 24, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "10.0.1.0", + PrefixLength: 24, + }, + GatewayIPAddress: "10.0.1.1", + }, + HostPrimaryIPInfo: cns.HostIPInfo{ + Gateway: "10.0.0.1", + PrimaryIP: "10.0.0.1", + Subnet: "10.0.0.0/24", + }, + NICType: cns.InfraNIC, + }, + { + PodIPConfig: cns.IPSubnet{ + IPAddress: "169.254.128.10", + PrefixLength: 17, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + GatewayIPAddress: "169.254.128.1", + }, + NICType: cns.ApipaNIC, + NetworkContainerID: "test-nc-id", + AllowHostToNCCommunication: true, + AllowNCToHostCommunication: false, + }, + }, + Response: cns.Response{ + ReturnCode: 0, + Message: "", + }, + }, + err: nil, + }, + }, + }, + args: args{ + nwCfg: &cni.NetworkConfig{}, + args: &cniSkel.CmdArgs{ + ContainerID: "testcontainerid", + Netns: "testnetns", + IfName: "testifname", + }, + options: map[string]interface{}{}, + }, + wantApipaResult: network.InterfaceInfo{ + IPConfigs: []*network.IPConfig{ + { + Address: net.IPNet{ + IP: net.ParseIP("169.254.128.10"), + Mask: net.CIDRMask(17, 32), + }, + Gateway: net.ParseIP("169.254.128.1"), + }, + }, + NICType: cns.ApipaNIC, + SkipDefaultRoutes: true, + NetworkContainerID: "test-nc-id", + AllowHostToNCCommunication: true, + AllowNCToHostCommunication: false, + }, + wantErr: false, + }, + { + name: "Test CNI add with Frontend Nic + ApipaNIC", + fields: fields{ + podName: testPodInfo.PodName, + podNamespace: testPodInfo.PodNamespace, + cnsClient: &MockCNSClient{ + require: require, + requestIPs: requestIPsHandler{ + ipconfigArgument: cns.IPConfigsRequest{ + PodInterfaceID: "testcont-testifname", + InfraContainerID: "testcontainerid", + OrchestratorContext: marshallPodInfo(testPodInfo), + }, + result: &cns.IPConfigsResponse{ + PodIPInfo: []cns.PodIpInfo{ + { + PodIPConfig: cns.IPSubnet{ + IPAddress: "10.0.1.10", + PrefixLength: 24, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: "10.0.1.0", + PrefixLength: 24, + }, + GatewayIPAddress: "10.0.1.1", + }, + HostPrimaryIPInfo: cns.HostIPInfo{ + Gateway: "10.0.0.1", + PrimaryIP: "10.0.0.1", + Subnet: "10.0.0.0/24", + }, + MacAddress: "bc:9a:78:56:34:12", + NICType: cns.NodeNetworkInterfaceFrontendNIC, + }, + { + PodIPConfig: cns.IPSubnet{ + IPAddress: "169.254.5.50", + PrefixLength: 16, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + GatewayIPAddress: "169.254.5.1", + }, + NICType: cns.ApipaNIC, + NetworkContainerID: "mixed-nc-id", + AllowHostToNCCommunication: true, + AllowNCToHostCommunication: false, + }, + }, + Response: cns.Response{ + ReturnCode: 0, + Message: "", + }, + }, + err: nil, + }, + }, + }, + args: args{ + nwCfg: &cni.NetworkConfig{}, + args: &cniSkel.CmdArgs{ + ContainerID: "testcontainerid", + Netns: "testnetns", + IfName: "testifname", + }, + options: map[string]interface{}{}, + }, + wantApipaResult: network.InterfaceInfo{ + IPConfigs: []*network.IPConfig{ + { + Address: net.IPNet{ + IP: net.ParseIP("169.254.5.50"), + Mask: net.CIDRMask(16, 32), + }, + Gateway: net.ParseIP("169.254.5.1"), + }, + }, + NICType: cns.ApipaNIC, + SkipDefaultRoutes: true, + NetworkContainerID: "mixed-nc-id", + AllowHostToNCCommunication: true, + AllowNCToHostCommunication: false, + }, + wantErr: false, + }, + { + name: "Test CNI add with ApipaNIC fails when GetIPNet fails", + fields: fields{ + podName: testPodInfo.PodName, + podNamespace: testPodInfo.PodNamespace, + cnsClient: &MockCNSClient{ + require: require, + requestIPs: requestIPsHandler{ + ipconfigArgument: cns.IPConfigsRequest{ + PodInterfaceID: "testcont-testifname", + InfraContainerID: "testcontainerid", + OrchestratorContext: marshallPodInfo(testPodInfo), + }, + result: &cns.IPConfigsResponse{ + PodIPInfo: []cns.PodIpInfo{ + { + PodIPConfig: cns.IPSubnet{ + IPAddress: "invalid-ip-address", + PrefixLength: 16, + }, + NetworkContainerPrimaryIPConfig: cns.IPConfiguration{ + GatewayIPAddress: "169.254.1.1", + }, + NICType: cns.ApipaNIC, + NetworkContainerID: "failed-nc-id", + AllowHostToNCCommunication: false, + AllowNCToHostCommunication: false, + }, + }, + Response: cns.Response{ + ReturnCode: 0, + Message: "", + }, + }, + err: nil, + }, + }, + }, + args: args{ + nwCfg: &cni.NetworkConfig{}, + args: &cniSkel.CmdArgs{ + ContainerID: "testcontainerid", + Netns: "testnetns", + IfName: "testifname", + }, + options: map[string]interface{}{}, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + invoker := &CNSIPAMInvoker{ + podName: tt.fields.podName, + podNamespace: tt.fields.podNamespace, + cnsClient: tt.fields.cnsClient, + } + if tt.fields.ipamMode != "" { + invoker.ipamMode = tt.fields.ipamMode + } + + ipamAddResult, err := invoker.Add(IPAMAddConfig{ + nwCfg: tt.args.nwCfg, + args: tt.args.args, + options: tt.args.options, + }) + + if tt.wantErr { + require.Error(err) + return + } + + require.NoError(err) + + // Find the ApipaNIC interface in the result + var apipaInterfaceFound bool + var actualApipaResult network.InterfaceInfo + + for _, ifInfo := range ipamAddResult.interfaceInfo { + if ifInfo.NICType == cns.ApipaNIC { + apipaInterfaceFound = true + actualApipaResult = ifInfo + break + } + } + + require.True(apipaInterfaceFound, "ApipaNIC interface should be found in the result") + + // Verify the ApipaNIC interface info + // Lines around 2586-2590 should be: + require.Equal(string(tt.wantApipaResult.NICType), string(actualApipaResult.NICType), "NICType should match expected value") + require.Equal(tt.wantApipaResult.SkipDefaultRoutes, actualApipaResult.SkipDefaultRoutes) + require.Equal(tt.wantApipaResult.NetworkContainerID, actualApipaResult.NetworkContainerID) + require.Equal(tt.wantApipaResult.AllowHostToNCCommunication, actualApipaResult.AllowHostToNCCommunication) + require.Equal(tt.wantApipaResult.AllowNCToHostCommunication, actualApipaResult.AllowNCToHostCommunication) + + // Verify IP configs + require.Len(actualApipaResult.IPConfigs, 1, "Should have exactly one IP config for ApipaNIC") + actualIPConfig := actualApipaResult.IPConfigs[0] + expectedIPConfig := tt.wantApipaResult.IPConfigs[0] + + require.True(actualIPConfig.Address.IP.Equal(expectedIPConfig.Address.IP), + "IP addresses should match: expected %s, got %s", + expectedIPConfig.Address.IP, actualIPConfig.Address.IP) + require.Equal(expectedIPConfig.Address.Mask, actualIPConfig.Address.Mask, + "IP masks should match") + + if expectedIPConfig.Gateway != nil { + require.NotNil(actualIPConfig.Gateway, "Gateway should not be nil") + require.True(actualIPConfig.Gateway.Equal(expectedIPConfig.Gateway), + "Gateway IPs should match: expected %s, got %s", + expectedIPConfig.Gateway, actualIPConfig.Gateway) + } else { + require.Nil(actualIPConfig.Gateway, "Gateway should be nil") + } + }) + } +} diff --git a/cni/network/network.go b/cni/network/network.go index 1ec1666f45..a3fa3784d9 100644 --- a/cni/network/network.go +++ b/cni/network/network.go @@ -52,6 +52,7 @@ const ( ipv4FullMask = 32 ipv6FullMask = 128 ibInterfacePrefix = "ib" + apipaInterfacePrefix = "apipa" ) // CNI Operation Types @@ -643,6 +644,8 @@ func (plugin *NetPlugin) findMasterInterface(opt *createEpInfoOpt) string { // when the VF is dismounted, this interface will go away // return an unique interface name to containerd return ibInterfacePrefix + strconv.Itoa(opt.endpointIndex) + case cns.ApipaNIC: + return apipaInterfacePrefix + strconv.Itoa(opt.endpointIndex) default: return "" } @@ -757,8 +760,11 @@ func (plugin *NetPlugin) createEpInfo(opt *createEpInfoOpt) (*network.EndpointIn IPAddresses: addresses, MacAddress: opt.ifInfo.MacAddress, // the following is used for creating an external interface if we can't find an existing network - HostSubnetPrefix: opt.ifInfo.HostSubnetPrefix.String(), - PnPID: opt.ifInfo.PnPID, + HostSubnetPrefix: opt.ifInfo.HostSubnetPrefix.String(), + PnPID: opt.ifInfo.PnPID, + NetworkContainerID: opt.ifInfo.NetworkContainerID, + AllowInboundFromHostToNC: opt.ifInfo.AllowHostToNCCommunication, + AllowInboundFromNCToHost: opt.ifInfo.AllowNCToHostCommunication, } if err = addSubnetToEndpointInfo(*opt.ifInfo, &endpointInfo); err != nil { @@ -1072,7 +1078,8 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { epInfos, err = plugin.nm.GetEndpointState(networkID, args.ContainerID) // if stateless CNI fail to get the endpoint from CNS for any reason other than Endpoint Not found if err != nil { - if errors.Is(err, network.ErrConnectionFailure) { + // async delete should be disabled for standalone scenario + if errors.Is(err, network.ErrConnectionFailure) && !nwCfg.DisableAsyncDelete { logger.Info("failed to connect to CNS", zap.String("containerID", args.ContainerID), zap.Error(err)) addErr := fsnotify.AddFile(args.ContainerID, args.ContainerID, watcherPath) logger.Info("add containerid file for Asynch delete", zap.String("containerID", args.ContainerID), zap.Error(addErr)) @@ -1152,10 +1159,10 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { } } } - logger.Info("Deleting the state from the cni statefile") + logger.Info("Deleting endpoint state from statefile") err = plugin.nm.DeleteState(epInfos) if err != nil { - return plugin.RetriableError(fmt.Errorf("failed to save state: %w", err)) + return plugin.RetriableError(fmt.Errorf("failed to delete state: %w", err)) } return err diff --git a/cni/network/network_windows.go b/cni/network/network_windows.go index f7d2e5defb..255fad470f 100644 --- a/cni/network/network_windows.go +++ b/cni/network/network_windows.go @@ -75,10 +75,13 @@ func (plugin *NetPlugin) getNetworkName(netNs string, interfaceInfo *network.Int // Swiftv2 L1VH Network Name swiftv2NetworkNamePrefix := "azure-" if interfaceInfo != nil && (interfaceInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC || interfaceInfo.NICType == cns.BackendNIC) { - logger.Info("swiftv2", zap.String("network name", interfaceInfo.MacAddress.String())) return swiftv2NetworkNamePrefix + interfaceInfo.MacAddress.String(), nil } + if interfaceInfo != nil && interfaceInfo.NICType == cns.ApipaNIC { + return swiftv2NetworkNamePrefix + apipaInterfacePrefix, nil + } + // For singletenancy, the network name is simply the nwCfg.Name if !nwCfg.MultiTenancy { return nwCfg.Name, nil diff --git a/cni/network/network_windows_test.go b/cni/network/network_windows_test.go index 8c47d739ca..bcf3199e31 100644 --- a/cni/network/network_windows_test.go +++ b/cni/network/network_windows_test.go @@ -661,6 +661,45 @@ func TestGetNetworkNameSwiftv2FromCNS(t *testing.T) { want: "", wantErr: false, }, + { + name: "Get Network Name from CNS for swiftv2 ApipaNIC", + plugin: &NetPlugin{ + Plugin: plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), + }, + netNs: "azure", + nwCfg: &cni.NetworkConfig{ + CNIVersion: "0.3.0", + MultiTenancy: false, + }, + interfaceInfo: &network.InterfaceInfo{ + Name: "apipa-interface", + MacAddress: parsedMacAddress, + NICType: cns.ApipaNIC, + }, + want: swiftv2NetworkNamePrefix + "apipa", // "azure-apipa" + wantErr: false, + }, + { + name: "Get Network Name from CNS for swiftv2 ApipaNIC with empty MacAddress", + plugin: &NetPlugin{ + Plugin: plugin, + nm: network.NewMockNetworkmanager(network.NewMockEndpointClient(nil)), + ipamInvoker: NewMockIpamInvoker(false, false, false, false, false), + }, + netNs: "azure", + nwCfg: &cni.NetworkConfig{ + CNIVersion: "0.3.0", + MultiTenancy: false, + }, + interfaceInfo: &network.InterfaceInfo{ + Name: "apipa-test-interface", + NICType: cns.ApipaNIC, + }, + want: swiftv2NetworkNamePrefix + "apipa", // "azure-apipa" + wantErr: false, + }, } for _, tt := range tests { diff --git a/cns/Dockerfile b/cns/Dockerfile index 7908371aea..36fd15919a 100644 --- a/cns/Dockerfile +++ b/cns/Dockerfile @@ -38,4 +38,4 @@ FROM --platform=windows/${ARCH} mcr.microsoft.com/oss/kubernetes/windows-host-pr FROM hpc as windows COPY --from=builder /go/bin/azure-cns /azure-cns.exe ENTRYPOINT ["azure-cns.exe"] -EXPOSE 10090 +EXPOSE 10090 \ No newline at end of file diff --git a/cns/NetworkContainerContract.go b/cns/NetworkContainerContract.go index 3bbacb6558..d4f4472a40 100644 --- a/cns/NetworkContainerContract.go +++ b/cns/NetworkContainerContract.go @@ -93,6 +93,9 @@ const ( NodeNetworkInterfaceFrontendNIC NICType = "FrontendNIC" // NodeNetworkInterfaceBackendNIC is the new name for BackendNIC NodeNetworkInterfaceBackendNIC NICType = "BackendNIC" + + // ApipaNIC is used for internal communication between host and container + ApipaNIC NICType = "ApipaNIC" ) // ChannelMode :- CNS channel modes @@ -521,6 +524,12 @@ type PodIpInfo struct { PnPID string // Default Deny ACL's to configure on HNS endpoints for Swiftv2 window nodes EndpointPolicies []policy.Policy + // This flag is in effect only if nic type is apipa. This allows connection originating from host to container via apipa nic and not other way. + AllowHostToNCCommunication bool + // This flag is in effect only if nic type is apipa. This allows connection originating from container to host via apipa nic and not other way. + AllowNCToHostCommunication bool + // NetworkContainerID is the ID of the network container to which this Pod IP belongs + NetworkContainerID string } type HostIPInfo struct { diff --git a/cns/client/client.go b/cns/client/client.go index 1021d6f412..7ef711f1cb 100644 --- a/cns/client/client.go +++ b/cns/client/client.go @@ -1100,3 +1100,37 @@ func (c *Client) UpdateEndpoint(ctx context.Context, endpointID string, ipInfo m return &response, nil } + +// DeleteEndpointState calls the DeleteEndpointHandler API in CNS to delete the state of a given EndpointID(containerID) +// This api is called for swiftv2 standalone scenario to cleanup state in CNS +func (c *Client) DeleteEndpointState(ctx context.Context, endpointID string) (*cns.Response, error) { + // build the request + u := c.routes[cns.EndpointAPI] + uString := u.String() + endpointID + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, uString, http.NoBody) + if err != nil { + return nil, errors.Wrap(err, "failed to build request") + } + req.Header.Set(headerContentType, contentTypeJSON) + res, err := c.client.Do(req) + if err != nil { + return nil, &ConnectionFailureErr{cause: err} + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, errors.Errorf("http response %d", res.StatusCode) + } + + var response cns.Response + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + return nil, errors.Wrap(err, "failed to decode CNS Response") + } + + if response.ReturnCode != 0 { + return nil, errors.New(response.Message) + } + + return &response, nil +} diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index cbc663e9e9..f54e4b4189 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -150,6 +150,7 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte // assign NICType and MAC Address for SwiftV2. we assume that there won't be any SwiftV1 NCs here podIPInfoList := make([]cns.PodIpInfo, 0, len(resp)) + apipaIndex := -1 for i := range resp { podIPInfo := cns.PodIpInfo{ PodIPConfig: resp[i].IPConfiguration.IPSubnet, @@ -157,8 +158,25 @@ func (service *HTTPRestService) requestIPConfigHandlerHelperStandalone(ctx conte NICType: resp[i].NetworkInterfaceInfo.NICType, NetworkContainerPrimaryIPConfig: resp[i].IPConfiguration, SecondaryIPConfigs: resp[i].SecondaryIPConfigs, + NetworkContainerID: resp[i].NetworkContainerID, } podIPInfoList = append(podIPInfoList, podIPInfo) + if resp[i].AllowHostToNCCommunication || resp[i].AllowNCToHostCommunication { + apipaIndex = i + } + } + + if apipaIndex != -1 { + apipaPodIPInfo := cns.PodIpInfo{ + PodIPConfig: resp[apipaIndex].LocalIPConfiguration.IPSubnet, + NICType: cns.ApipaNIC, + NetworkContainerPrimaryIPConfig: resp[apipaIndex].LocalIPConfiguration, + SkipDefaultRoutes: true, + AllowHostToNCCommunication: resp[apipaIndex].AllowHostToNCCommunication, + AllowNCToHostCommunication: resp[apipaIndex].AllowNCToHostCommunication, + NetworkContainerID: resp[apipaIndex].NetworkContainerID, + } + podIPInfoList = append(podIPInfoList, apipaPodIPInfo) } ipConfigsResp := &cns.IPConfigsResponse{ @@ -1152,9 +1170,75 @@ func (service *HTTPRestService) EndpointHandlerAPI(w http.ResponseWriter, r *htt service.GetEndpointHandler(w, r) case http.MethodPatch: service.UpdateEndpointHandler(w, r) + case http.MethodDelete: + service.DeleteEndpointStateHandler(w, r) default: - logger.Errorf("[EndpointHandlerAPI] EndpointHandler API expect http Get or Patch method") + //nolint + logger.Errorf("[EndpointHandlerAPI] EndpointHandler API expect http Get or Patch or Delete method") + } +} + +func (service *HTTPRestService) DeleteEndpointStateHandler(w http.ResponseWriter, r *http.Request) { + opName := "DeleteEndpointStateHandler" + logger.Printf("[DeleteEndpointStateHandler] DeleteEndpointState for %s", r.URL.Path) //nolint:staticcheck // reason: using deprecated call until migration to new API + endpointID := strings.TrimPrefix(r.URL.Path, cns.EndpointPath) + + if service.EndpointStateStore == nil { + response := cns.Response{ + ReturnCode: types.NilEndpointStateStore, + Message: "[DeleteEndpointStateHandler] EndpointStateStore is not initialized", + } + err := common.Encode(w, &response) + logger.Response(opName, response, response.ReturnCode, err) //nolint:staticcheck // reason: using deprecated call until migration to new API + return + } + + // Delete the endpoint from state + err := service.DeleteEndpointStateHelper(endpointID) + if err != nil { + response := cns.Response{ + ReturnCode: types.UnexpectedError, + Message: fmt.Sprintf("[DeleteEndpointStateHandler] Failed to delete endpoint state for %s with error: %s", endpointID, err.Error()), + } + + if errors.Is(err, ErrEndpointStateNotFound) { + response.ReturnCode = types.NotFound + } + + err = common.Encode(w, &response) + logger.Response(opName, response, response.ReturnCode, err) //nolint:staticcheck // reason: using deprecated call until migration to new API + return + } + + response := cns.Response{ + ReturnCode: types.Success, + Message: "[DeleteEndpointStateHandler] Endpoint state deleted successfully", } + err = common.Encode(w, &response) + logger.Response(opName, response, response.ReturnCode, err) //nolint:staticcheck // reason: using deprecated call until migration to new API +} + +func (service *HTTPRestService) DeleteEndpointStateHelper(endpointID string) error { + if service.EndpointStateStore == nil { + return ErrStoreEmpty + } + logger.Printf("[deleteEndpointState] Deleting Endpoint state from state file %s", endpointID) //nolint:staticcheck // reason: using deprecated call until migration to new API + _, endpointExist := service.EndpointState[endpointID] + if !endpointExist { + logger.Printf("[deleteEndpointState] endpoint could not be found in the statefile %s", endpointID) //nolint:staticcheck // reason: using deprecated call until migration to new API + return fmt.Errorf("[deleteEndpointState] endpoint %s: %w", endpointID, ErrEndpointStateNotFound) + } + + // Delete the endpoint from the state + delete(service.EndpointState, endpointID) + + // Write the updated state back to the store + err := service.EndpointStateStore.Write(EndpointStoreKey, service.EndpointState) + if err != nil { + return fmt.Errorf("[deleteEndpointState] failed to write endpoint state to store: %w", err) + } + logger.Printf("[deleteEndpointState] successfully deleted endpoint %s from state file", endpointID) //nolint:staticcheck // reason: using deprecated call until migration to new API + return nil } // GetEndpointHandler handles the incoming GetEndpoint requests with http Get method @@ -1342,6 +1426,11 @@ func updateIPInfoMap(iPInfo map[string]*IPInfo, interfaceInfo *IPInfo, ifName, e iPInfo[ifName].MacAddress = interfaceInfo.MacAddress logger.Printf("[updateEndpoint] update the endpoint %s with MacAddress %s", endpointID, interfaceInfo.MacAddress) } + + if interfaceInfo.NetworkContainerID != "" { + iPInfo[ifName].NetworkContainerID = interfaceInfo.NetworkContainerID + logger.Printf("[updateEndpoint] update the endpoint %s with NetworkContainerID %s", endpointID, interfaceInfo.NetworkContainerID) //nolint + } } // verifyUpdateEndpointStateRequest verify the CNI request body for the UpdateENdpointState API diff --git a/cns/restserver/ipam_test.go b/cns/restserver/ipam_test.go index dc582a0e8d..b126a04d30 100644 --- a/cns/restserver/ipam_test.go +++ b/cns/restserver/ipam_test.go @@ -2443,11 +2443,12 @@ func TestStatelessCNIStateFile(t *testing.T) { endpointInfo2ContainerID := "1b4917617e15d24dc495e407d8eb5c88e4406e58fa209e4eb75a2c2fb7045eea" endpointInfo2 := &EndpointInfo{IfnameToIPMap: make(map[string]*IPInfo)} endpointInfo2.IfnameToIPMap["eth2"] = &IPInfo{ - IPv4: nil, - NICType: cns.DelegatedVMNIC, - HnsEndpointID: "5c15cccc-830a-4dff-81f3-4b1e55cb7dcb", - HnsNetworkID: "5c0712cd-824c-4898-b1c0-2fcb16ede4fb", - MacAddress: "7c:1e:52:06:d3:4b", + IPv4: nil, + NICType: cns.DelegatedVMNIC, + HnsEndpointID: "5c15cccc-830a-4dff-81f3-4b1e55cb7dcb", + HnsNetworkID: "5c0712cd-824c-4898-b1c0-2fcb16ede4fb", + MacAddress: "7c:1e:52:06:d3:4b", + NetworkContainerID: testNCID, } // test cases tests := []struct { diff --git a/cns/restserver/restserver.go b/cns/restserver/restserver.go index 84589292ce..f14c69d12c 100644 --- a/cns/restserver/restserver.go +++ b/cns/restserver/restserver.go @@ -126,13 +126,14 @@ type EndpointInfo struct { } type IPInfo struct { - IPv4 []net.IPNet - IPv6 []net.IPNet `json:",omitempty"` - HnsEndpointID string `json:",omitempty"` - HnsNetworkID string `json:",omitempty"` - HostVethName string `json:",omitempty"` - MacAddress string `json:",omitempty"` - NICType cns.NICType + IPv4 []net.IPNet + IPv6 []net.IPNet `json:",omitempty"` + HnsEndpointID string `json:",omitempty"` + HnsNetworkID string `json:",omitempty"` + HostVethName string `json:",omitempty"` + MacAddress string `json:",omitempty"` + NetworkContainerID string `json:",omitempty"` + NICType cns.NICType } type GetHTTPServiceDataResponse struct { diff --git a/network/endpoint.go b/network/endpoint.go index bd9fa7fc9b..08f2e81f6c 100644 --- a/network/endpoint.go +++ b/network/endpoint.go @@ -139,6 +139,10 @@ type InterfaceInfo struct { NCResponse *cns.GetNetworkContainerResponse PnPID string EndpointPolicies []policy.Policy + // these fields will be required for swiftv2 apipa nic + NetworkContainerID string + AllowNCToHostCommunication bool + AllowHostToNCCommunication bool } type IPConfig struct { @@ -167,10 +171,10 @@ func FormatSliceOfPointersToString[T any](slice []*T) string { func (epInfo *EndpointInfo) PrettyString() string { return fmt.Sprintf("EndpointID:%s ContainerID:%s NetNsPath:%s IfName:%s IfIndex:%d MacAddr:%s IPAddrs:%v Gateways:%v Data:%+v NICType: %s "+ - "NetworkContainerID: %s HostIfName: %s NetNs: %s Options: %v MasterIfName: %s HNSEndpointID: %s HNSNetworkID: %s", + "NetworkContainerID: %s HostIfName: %s NetNs: %s Options: %v MasterIfName: %s HNSEndpointID: %s HNSNetworkID: %s AllowHostToNC:%t AllowNCToHost:%t", epInfo.EndpointID, epInfo.ContainerID, epInfo.NetNsPath, epInfo.IfName, epInfo.IfIndex, epInfo.MacAddress.String(), epInfo.IPAddresses, epInfo.Gateways, epInfo.Data, epInfo.NICType, epInfo.NetworkContainerID, epInfo.HostIfName, epInfo.NetNs, epInfo.Options, epInfo.MasterIfName, - epInfo.HNSEndpointID, epInfo.HNSNetworkID) + epInfo.HNSEndpointID, epInfo.HNSNetworkID, epInfo.AllowInboundFromHostToNC, epInfo.AllowInboundFromNCToHost) } func (ifInfo *InterfaceInfo) PrettyString() string { diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index 52ef0eff5d..7b842dfe32 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -157,12 +157,17 @@ func (nw *network) newEndpointImpl( return nw.getEndpointWithVFDevice(plc, epInfo) } + if epInfo.NICType == cns.ApipaNIC { + return nw.createHostNCApipaEndpoint(cli, epInfo) + } + if useHnsV2, err := UseHnsV2(epInfo.NetNsPath); useHnsV2 { if err != nil { return nil, err } return nw.newEndpointImplHnsV2(cli, epInfo) + } return nw.newEndpointImplHnsV1(epInfo, plc) @@ -354,14 +359,16 @@ func (nw *network) configureHcnEndpoint(epInfo *EndpointInfo) (*hcn.HostComputeE return hcnEndpoint, nil } +func getApipaEndpointName(networkContainerID string) string { + endpointName := fmt.Sprintf("%s-%s", hostNCApipaEndpointNamePrefix, networkContainerID) + return endpointName +} + func (nw *network) deleteHostNCApipaEndpoint(networkContainerID string) error { // TODO: this code is duplicated in cns/hnsclient, but that code has logging messages that require a CNSLogger, // which makes is hard to use in this package. We should refactor this into a common package with no logging deps // so it can be called in both places - - // HostNCApipaEndpoint name is derived from NC ID - endpointName := fmt.Sprintf("%s-%s", hostNCApipaEndpointNamePrefix, networkContainerID) - logger.Info("Deleting HostNCApipaEndpoint for NC", zap.String("endpointName", endpointName), zap.String("networkContainerID", networkContainerID)) + endpointName := getApipaEndpointName(networkContainerID) // Check if the endpoint exists endpoint, err := Hnsv2.GetEndpointByName(endpointName) @@ -376,6 +383,7 @@ func (nw *network) deleteHostNCApipaEndpoint(networkContainerID string) error { return nil } + logger.Info("Deleting Apipa Endpoint", zap.String("endpointName", endpointName)) if err := Hnsv2.DeleteEndpoint(endpoint); err != nil { return fmt.Errorf("failed to delete HostNCApipa endpoint: %+v: %w", endpoint, err) } @@ -387,7 +395,7 @@ func (nw *network) deleteHostNCApipaEndpoint(networkContainerID string) error { // createHostNCApipaEndpoint creates a new endpoint in the HostNCApipaNetwork // for host container connectivity -func (nw *network) createHostNCApipaEndpoint(cli apipaClient, epInfo *EndpointInfo) error { +func (nw *network) createHostNCApipaEndpoint(cli apipaClient, epInfo *EndpointInfo) (*endpoint, error) { var ( err error hostNCApipaEndpointID string @@ -395,7 +403,8 @@ func (nw *network) createHostNCApipaEndpoint(cli apipaClient, epInfo *EndpointIn ) if namespace, err = hcn.GetNamespaceByID(epInfo.NetNsPath); err != nil { - return fmt.Errorf("Failed to retrieve namespace with GetNamespaceByID for NetNsPath: %s"+ + //nolint + return nil, fmt.Errorf("Failed to retrieve namespace with GetNamespaceByID for NetNsPath: %s"+ " due to error: %v", epInfo.NetNsPath, err) } @@ -403,7 +412,8 @@ func (nw *network) createHostNCApipaEndpoint(cli apipaClient, epInfo *EndpointIn zap.String("NetworkContainerID", epInfo.NetworkContainerID)) if hostNCApipaEndpointID, err = cli.CreateHostNCApipaEndpoint(context.TODO(), epInfo.NetworkContainerID); err != nil { - return err + //nolint + return nil, err } defer func() { @@ -413,10 +423,20 @@ func (nw *network) createHostNCApipaEndpoint(cli apipaClient, epInfo *EndpointIn }() if err = hcn.AddNamespaceEndpoint(namespace.Id, hostNCApipaEndpointID); err != nil { - return fmt.Errorf("Failed to add HostNCApipaEndpoint: %s to namespace: %s due to error: %v", hostNCApipaEndpointID, namespace.Id, err) //nolint + //nolint + return nil, fmt.Errorf("Failed to add HostNCApipaEndpoint: %s to namespace: %s due to error: %v", hostNCApipaEndpointID, namespace.Id, err) } - return nil + ep := &endpoint{ + Id: getApipaEndpointName(epInfo.NetworkContainerID), + HnsId: hostNCApipaEndpointID, + IfName: getApipaEndpointName(epInfo.NetworkContainerID), + ContainerID: epInfo.ContainerID, + NICType: cns.ApipaNIC, + NetworkContainerID: epInfo.NetworkContainerID, + } + + return ep, nil } // newEndpointImplHnsV2 creates a new endpoint in the network using Hnsv2 @@ -464,7 +484,7 @@ func (nw *network) newEndpointImplHnsV2(cli apipaClient, epInfo *EndpointInfo) ( // If the Host - container connectivity is requested, create endpoint in HostNCApipaNetwork if epInfo.AllowInboundFromHostToNC || epInfo.AllowInboundFromNCToHost { - if err = nw.createHostNCApipaEndpoint(cli, epInfo); err != nil { + if _, err = nw.createHostNCApipaEndpoint(cli, epInfo); err != nil { return nil, fmt.Errorf("Failed to create HostNCApipaEndpoint due to error: %v", err) } } @@ -529,6 +549,10 @@ func (nw *network) deleteEndpointImpl(_ netlink.NetlinkInterface, _ platform.Exe return nil } + if ep.NICType == cns.ApipaNIC { + return nw.deleteHostNCApipaEndpoint(ep.NetworkContainerID) + } + if ep.HnsId == "" { logger.Info("No HNS id found. Skip endpoint deletion", zap.Any("nicType", ep.NICType), zap.String("containerId", ep.ContainerID)) return nil diff --git a/network/endpoint_windows_test.go b/network/endpoint_windows_test.go index 1dfb414bbc..a65add6215 100644 --- a/network/endpoint_windows_test.go +++ b/network/endpoint_windows_test.go @@ -666,15 +666,15 @@ func TestDeleteEndpointStateForInfraDelegatedNIC(t *testing.T) { HNSNetworkID: networkID, } - // mock DeleteEndpointState() to make sure endpoint and network is deleted from cache + // mock DeleteEndpointStateless() to make sure endpoint and network is deleted from cache // network and endpoint should be deleted from cache for delegatedNIC - err = nm.DeleteEndpointState(networkID, delegatedEpInfo) + err = nm.DeleteEndpointStateless(networkID, delegatedEpInfo) if err != nil { t.Fatalf("Failed to delete endpoint for delegatedNIC state due to %v", err) } // endpoint should be deleted from cache for delegatedNIC and network is still there - err = nm.DeleteEndpointState(infraNetworkID, infraEpInfo) + err = nm.DeleteEndpointStateless(infraNetworkID, infraEpInfo) if err != nil { t.Fatalf("Failed to delete endpoint for delegatedNIC state due to %v", err) } diff --git a/network/manager.go b/network/manager.go index 7bc1441fea..bef8858087 100644 --- a/network/manager.go +++ b/network/manager.go @@ -424,7 +424,8 @@ func (nm *networkManager) UpdateEndpointState(eps []*endpoint) error { ifnameToIPInfoMap := generateCNSIPInfoMap(eps) // key : interface name, value : IPInfo for key, ipinfo := range ifnameToIPInfoMap { logger.Info("Update endpoint state", zap.String("ifname", key), zap.String("hnsEndpointID", ipinfo.HnsEndpointID), zap.String("hnsNetworkID", ipinfo.HnsNetworkID), - zap.String("hostVethName", ipinfo.HostVethName), zap.String("macAddress", ipinfo.MacAddress), zap.String("nicType", string(ipinfo.NICType))) + zap.String("hostVethName", ipinfo.HostVethName), zap.String("macAddress", ipinfo.MacAddress), zap.String("nicType", string(ipinfo.NICType)), + zap.String("networkContainerID", ipinfo.NetworkContainerID)) } // we assume all endpoints have the same container id cnsEndpointID := eps[0].ContainerID @@ -491,7 +492,7 @@ func (nm *networkManager) DeleteEndpoint(networkID, endpointID string, epInfo *E if nm.IsStatelessCNIMode() { // Calls deleteEndpointImpl directly, skipping the get network check; does not call cns - return nm.DeleteEndpointState(networkID, epInfo) + return nm.DeleteEndpointStateless(networkID, epInfo) } nw, err := nm.getNetwork(networkID) @@ -507,7 +508,7 @@ func (nm *networkManager) DeleteEndpoint(networkID, endpointID string, epInfo *E return nil } -func (nm *networkManager) DeleteEndpointState(networkID string, epInfo *EndpointInfo) error { +func (nm *networkManager) DeleteEndpointStateless(networkID string, epInfo *EndpointInfo) error { // we want to always use hnsv2 in stateless // hnsv2 is only enabled if NetNs has a valid guid and the hnsv2 api is supported // by passing in a dummy guid, we satisfy the first condition @@ -761,16 +762,33 @@ func (nm *networkManager) SaveState(eps []*endpoint) error { return nm.save() } -func (nm *networkManager) DeleteState(_ []*EndpointInfo) error { +func (nm *networkManager) DeleteState(epInfos []*EndpointInfo) error { nm.Lock() defer nm.Unlock() logger.Info("Deleting state") - // We do not use DeleteEndpointState for stateless cni because we already call it in DeleteEndpoint - // This function is only for saving to stateless cni or the cni statefile - // For stateless cni, plugin.ipamInvoker.Delete takes care of removing the state in the main Delete function + // For AKS stateless cni, plugin.ipamInvoker.Delete takes care of removing the state in the main Delete function. + // For swiftv2 stateless cni, this call will delete the endpoint state from CNS. if nm.IsStatelessCNIMode() { + for _, epInfo := range epInfos { + // this cleanup happens only for standalone swiftv2 to delete endpoint state from CNS. + if epInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC || epInfo.NICType == cns.NodeNetworkInterfaceAccelnetFrontendNIC { + // swiftv2 multitenancy does not call plugin.ipamInvoker.Delete and so state does not automatically clean up. this call is required to + // cleanup state in CNS + // One Delete call for endpointID will remove all interface info associated with that endpointID in CNS + response, err := nm.CnsClient.DeleteEndpointState(context.TODO(), epInfo.EndpointID) + if err != nil { + if response != nil && response.ReturnCode == types.NotFound { + logger.Info("Endpoint state not found in CNS", zap.String("endpointID", epInfo.EndpointID)) + return nil + } + return errors.Wrapf(err, "Delete endpoint API returned with error for endpoint %s", epInfo.EndpointID) + } + logger.Info("Delete endpoint succeeded", zap.String("endpointID", epInfo.EndpointID), zap.String("returnCode", response.ReturnCode.String())) + break + } + } return nil } @@ -784,12 +802,11 @@ func cnsEndpointInfotoCNIEpInfos(endpointInfo restserver.EndpointInfo, endpointI for ifName, ipInfo := range endpointInfo.IfnameToIPMap { epInfo := &EndpointInfo{ - EndpointID: endpointID, // endpoint id is always the same, but we shouldn't use it in the stateless path - IfIndex: EndpointIfIndex, // Azure CNI supports only one interface - ContainerID: endpointID, - PODName: endpointInfo.PodName, - PODNameSpace: endpointInfo.PodNamespace, - NetworkContainerID: endpointID, + EndpointID: endpointID, // endpoint id is always the same, but we shouldn't use it in the stateless path + IfIndex: EndpointIfIndex, // Azure CNI supports only one interface + ContainerID: endpointID, + PODName: endpointInfo.PodName, + PODNameSpace: endpointInfo.PodNamespace, } // If we create an endpoint state with stateful cni and then swap to a stateless cni binary, ifname would not be populated @@ -809,6 +826,8 @@ func cnsEndpointInfotoCNIEpInfos(endpointInfo restserver.EndpointInfo, endpointI epInfo.NICType = ipInfo.NICType epInfo.HNSNetworkID = ipInfo.HnsNetworkID epInfo.MacAddress = net.HardwareAddr(ipInfo.MacAddress) + epInfo.NetworkContainerID = ipInfo.NetworkContainerID + ret = append(ret, epInfo) } return ret @@ -837,11 +856,12 @@ func generateCNSIPInfoMap(eps []*endpoint) map[string]*restserver.IPInfo { for _, ep := range eps { ifNametoIPInfoMap[ep.IfName] = &restserver.IPInfo{ // in windows, the nicname is args ifname, in linux, it's ethX - NICType: ep.NICType, - HnsEndpointID: ep.HnsId, - HnsNetworkID: ep.HNSNetworkID, - HostVethName: ep.HostIfName, - MacAddress: ep.MacAddress.String(), + NICType: ep.NICType, + HnsEndpointID: ep.HnsId, + HnsNetworkID: ep.HNSNetworkID, + HostVethName: ep.HostIfName, + MacAddress: ep.MacAddress.String(), + NetworkContainerID: ep.NetworkContainerID, } } diff --git a/network/network.go b/network/network.go index 31537b6522..41e1bf4307 100644 --- a/network/network.go +++ b/network/network.go @@ -108,7 +108,7 @@ func (nm *networkManager) newExternalInterface(ifName, subnet, nicType string) e // Find the host interface. macAddress := net.HardwareAddr{} - if nicType != string(cns.BackendNIC) { + if nicType != string(cns.BackendNIC) && nicType != string(cns.ApipaNIC) { hostIf, err := net.InterfaceByName(ifName) if err != nil { return errors.Wrap(err, "failed to find host interface") @@ -337,7 +337,6 @@ func (nm *networkManager) EndpointCreate(cnsclient apipaClient, epInfos []*Endpo return err } } - ep, err := nm.createEndpoint(cnsclient, epInfo.NetworkID, epInfo) if err != nil { return err diff --git a/network/network_windows.go b/network/network_windows.go index 3107800eed..fbc8b3677a 100644 --- a/network/network_windows.go +++ b/network/network_windows.go @@ -348,7 +348,8 @@ func (nm *networkManager) addIPv6DefaultRoute() error { // newNetworkImplHnsV2 creates a new container network for HNSv2. func (nm *networkManager) newNetworkImplHnsV2(nwInfo *EndpointInfo, extIf *externalInterface) (*network, error) { // network creation is not required for IB - if nwInfo.NICType == cns.BackendNIC { + // For apipa nic, we create network as part of endpoint creation + if nwInfo.NICType == cns.BackendNIC || nwInfo.NICType == cns.ApipaNIC { return &network{Endpoints: make(map[string]*endpoint)}, nil } From 8bf932a458e1e2d2129c012b0c0f6101e1b84463 Mon Sep 17 00:00:00 2001 From: Vipul Singh Date: Sun, 16 Nov 2025 12:36:40 -0800 Subject: [PATCH 24/47] ci: Manifests files for cilium v1.18 (#4100) ci: Add manifest for cilium v1.18 Signed-off-by: Vipul Singh --- .pipelines/build/dockerfiles/cns.Dockerfile | 4 +- cni/Dockerfile | 4 +- cns/Dockerfile | 6 +- .../v1.18/cilium-agent/files/clusterrole.yaml | 121 + .../files/clusterrolebinding.yaml | 14 + .../cilium-agent/files/serviceaccount.yaml | 5 + .../templates/daemonset-dualstack.yaml | 436 + .../cilium-agent/templates/daemonset.yaml | 431 + .../cilium-config-dualstack.yaml | 147 + .../cilium-config/cilium-config-hubble.yaml | 149 + .../v1.18/cilium-config/cilium-config.yaml | 145 + .../cilium-operator/files/clusterrole.yaml | 282 + .../files/clusterrolebinding.yaml | 12 + .../cilium-operator/files/serviceaccount.yaml | 5 + .../cilium-operator/templates/deployment.yaml | 171 + .../common/allowed-iptables-patterns.yaml | 61 + .../cilium/v1.18/ebpf/common/ccnps.yaml | 19 + .../ebpf/common/cilium-agent-clusterrole.yaml | 125 + .../ciliumclusterwidenetworkpolicies.yaml | 7054 +++++++++++++++++ .../cilium/v1.18/ebpf/overlay/cilium.yaml | 538 ++ ...azure-ip-masq-agent-config-reconciled.yaml | 13 + .../ebpf/overlay/static/cilium-config.yaml | 173 + .../cilium/v1.18/ebpf/podsubnet/cilium.yaml | 508 ++ .../azure-dns-imds-ip-masq-agent-config.yaml | 26 + .../ebpf/podsubnet/static/cilium-config.yaml | 173 + .../cilium/v1.18/hubble/hubble-peer-svc.yaml | 18 + 26 files changed, 10633 insertions(+), 7 deletions(-) create mode 100644 test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrole.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrolebinding.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-agent/files/serviceaccount.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset-dualstack.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-dualstack.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-hubble.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-config/cilium-config.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrole.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrolebinding.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-operator/files/serviceaccount.yaml create mode 100644 test/integration/manifests/cilium/v1.18/cilium-operator/templates/deployment.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/common/allowed-iptables-patterns.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/common/ccnps.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/common/cilium-agent-clusterrole.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/common/ciliumclusterwidenetworkpolicies.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/overlay/static/cilium-config.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml create mode 100644 test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/cilium-config.yaml create mode 100644 test/integration/manifests/cilium/v1.18/hubble/hubble-peer-svc.yaml diff --git a/.pipelines/build/dockerfiles/cns.Dockerfile b/.pipelines/build/dockerfiles/cns.Dockerfile index 1fc8f9d5b1..a58d38ac73 100644 --- a/.pipelines/build/dockerfiles/cns.Dockerfile +++ b/.pipelines/build/dockerfiles/cns.Dockerfile @@ -11,11 +11,11 @@ ENTRYPOINT ["azure-cns.exe"] EXPOSE 10090 # mcr.microsoft.com/azurelinux/base/core:3.0 -FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/base/core@sha256:833693619d523c23b1fe4d9c1f64a6c697e2a82f7a6ee26e1564897c3fe3fa02 AS build-helper +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/base/core@sha256:3d53b96f4e336a197023bda703a056eaefecc6728e9a2b0c1ef42f7dce183338 AS build-helper RUN tdnf install -y iptables # mcr.microsoft.com/azurelinux/distroless/minimal:3.0 -FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/distroless/minimal@sha256:d784c8233e87e8bce2e902ff59a91262635e4cabc25ec55ac0a718344514db3c AS linux +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/distroless/minimal@sha256:6b78aa535a2a5107ee308b767c0f1f5055a58d0e751f9d87543bc504da6d0ed3 AS linux ARG ARTIFACT_DIR . COPY --from=build-helper /usr/sbin/*tables* /usr/sbin/ diff --git a/cni/Dockerfile b/cni/Dockerfile index 5867fd09b2..df656227f0 100644 --- a/cni/Dockerfile +++ b/cni/Dockerfile @@ -6,10 +6,10 @@ ARG OS_VERSION ARG OS # mcr.microsoft.com/oss/go/microsoft/golang:1.24-azurelinux3.0 -FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:7bbbda682ce4a462855bd8a61c5efdc1e79ab89d9e32c2610f41e6f9502e1cf4 AS go +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:6a563aa0a323171b94932ea98451d600ae3472758faac8cd28bbd20c83cd1ef9 AS go # mcr.microsoft.com/azurelinux/base/core:3.0 -FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/base/core@sha256:833693619d523c23b1fe4d9c1f64a6c697e2a82f7a6ee26e1564897c3fe3fa02 AS mariner-core +FROM --platform=linux/${ARCH} mcr.microsoft.com/azurelinux/base/core@sha256:3d53b96f4e336a197023bda703a056eaefecc6728e9a2b0c1ef42f7dce183338 AS mariner-core FROM go AS azure-vnet ARG OS diff --git a/cns/Dockerfile b/cns/Dockerfile index 36fd15919a..32df485d25 100644 --- a/cns/Dockerfile +++ b/cns/Dockerfile @@ -5,13 +5,13 @@ ARG OS_VERSION ARG OS # mcr.microsoft.com/oss/go/microsoft/golang:1.24-azurelinux3.0 -FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:7bbbda682ce4a462855bd8a61c5efdc1e79ab89d9e32c2610f41e6f9502e1cf4 AS go +FROM --platform=linux/${ARCH} mcr.microsoft.com/oss/go/microsoft/golang@sha256:6a563aa0a323171b94932ea98451d600ae3472758faac8cd28bbd20c83cd1ef9 AS go # mcr.microsoft.com/azurelinux/base/core:3.0 -FROM mcr.microsoft.com/azurelinux/base/core@sha256:833693619d523c23b1fe4d9c1f64a6c697e2a82f7a6ee26e1564897c3fe3fa02 AS mariner-core +FROM mcr.microsoft.com/azurelinux/base/core@sha256:3d53b96f4e336a197023bda703a056eaefecc6728e9a2b0c1ef42f7dce183338 AS mariner-core # mcr.microsoft.com/azurelinux/distroless/minimal:3.0 -FROM mcr.microsoft.com/azurelinux/distroless/minimal@sha256:d784c8233e87e8bce2e902ff59a91262635e4cabc25ec55ac0a718344514db3c AS mariner-distroless +FROM mcr.microsoft.com/azurelinux/distroless/minimal@sha256:6b78aa535a2a5107ee308b767c0f1f5055a58d0e751f9d87543bc504da6d0ed3 AS mariner-distroless FROM --platform=linux/${ARCH} go AS builder ARG OS diff --git a/test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrole.yaml b/test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrole.yaml new file mode 100644 index 0000000000..b718138c9e --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrole.yaml @@ -0,0 +1,121 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumloadbalancerippools + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + # To synchronize garbage collection of such resources + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch +- apiGroups: + - "" + resourceNames: + - cilium-config + resources: + - configmaps + verbs: + - list + - watch diff --git a/test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrolebinding.yaml b/test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrolebinding.yaml new file mode 100644 index 0000000000..93a6e06cdc --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-agent/files/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: "cilium" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/cilium-agent/files/serviceaccount.yaml b/test/integration/manifests/cilium/v1.18/cilium-agent/files/serviceaccount.yaml new file mode 100644 index 0000000000..f7097b1616 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-agent/files/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset-dualstack.yaml b/test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset-dualstack.yaml new file mode 100644 index 0000000000..e025348dbc --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset-dualstack.yaml @@ -0,0 +1,436 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + app.kubernetes.io/part-of: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + labels: + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - BPF + - PERFMON + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + name: cilium-netns + mountPropagation: HostToContainer + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - name: install-cni-binaries + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + args: + - "/install-plugin.sh" + command: + - sh + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + drop: + - ALL + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - sh + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + - name: start-ipv6-hp-bpf + image: $IPV6_IMAGE_REGISTRY/ipv6-hp-bpf:$IPV6_HP_BPF_VERSION + imagePullPolicy: IfNotPresent + command: [/ipv6-hp-bpf] + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: ipv6-hp-bpf + - name: block-wireserver + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - sh + - -cx + - | + iptables -t mangle -C FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + status=$? + set -e + if [ $status -eq 0 ]; then + echo "Skip adding iptables as it already exists" + else + iptables -t mangle -I FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + fi + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /var/log + type: DirectoryOrCreate + name: ipv6-hp-bpf + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 2 + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset.yaml b/test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset.yaml new file mode 100644 index 0000000000..f7afdd7c21 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-agent/templates/daemonset.yaml @@ -0,0 +1,431 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + app.kubernetes.io/part-of: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + labels: + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - BPF + - PERFMON + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + name: cilium-netns + mountPropagation: HostToContainer + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - name: install-cni-binaries + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + args: + - "/install-plugin.sh" + command: + - sh + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + drop: + - ALL + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - sh + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/systemd + name: host-etc-systemd + - mountPath: /host/lib/systemd + name: host-lib-systemd + readOnly: true + - mountPath: /host/usr/lib + name: host-usr-lib + readOnly: true + - name: block-wireserver + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - sh + - -cx + - | + iptables -t mangle -C FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + status=$? + set -e + if [ $status -eq 0 ]; then + echo "Skip adding iptables as it already exists" + else + iptables -t mangle -I FORWARD -d 168.63.129.16 -p tcp --dport 80 -j DROP + fi + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 2 + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-dualstack.yaml b/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-dualstack.yaml new file mode 100644 index 0000000000..ab05c977c4 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-dualstack.yaml @@ -0,0 +1,147 @@ +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-filter-priority: "2" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "false" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "true" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "false" + enable-metrics: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + local-router-ipv6: "fe80::" + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "true" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "true" +## new values for 1.18 + bpf-policy-stats-map-max: "65536" + identity-management-mode: "agent" + tofqdns-preallocate-identities: "true" + policy-default-local-cluster: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-hubble.yaml b/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-hubble.yaml new file mode 100644 index 0000000000..d618d7ac6c --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config-hubble.yaml @@ -0,0 +1,149 @@ +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "true" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "false" + enable-metrics: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + hubble-listen-address: "" + hubble-metrics: flow:sourceContext=pod;destinationContext=pod tcp:sourceContext=pod;destinationContext=pod + dns:query drop:sourceContext=pod;destinationContext=pod + hubble-metrics-server: :9965 + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + # new default values from Cilium v1.14.4 + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" + ## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "false" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "false" + ## new values for 1.18 + bpf-policy-stats-map-max: "65536" + identity-management-mode: "agent" + tofqdns-preallocate-identities: "true" + policy-default-local-cluster: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config.yaml b/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config.yaml new file mode 100644 index 0000000000..0cd53bca70 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-config/cilium-config.yaml @@ -0,0 +1,145 @@ +apiVersion: v1 #Not verified, placeholder +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-routes: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "true" + enable-hubble: "false" + enable-ipv4: "true" + enable-ipv4-masquerade: "false" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" # set to true for lrp test + enable-metrics: "true" + enable-policy: default + enable-session-affinity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "0.0.0.0:10256" + local-router-ipv4: 169.254.23.0 + metrics: +cilium_bpf_map_pressure + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "0" + tofqdns-proxy-response-max-delay: 100ms + routing-mode: native + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + enable-sctp: "false" + external-envoy-proxy: "false" + k8s-client-qps: "10" + k8s-client-burst: "20" + mesh-auth-enabled: "true" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + proxy-connect-timeout: "2" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + set-cilium-node-taints: "true" +## new values added for 1.16 below + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-masquerade-to-route-source: "false" + enable-health-check-loadbalancer-ip: "false" + bpf-lb-acceleration: "disabled" + enable-k8s-networkpolicy: "true" + cni-exclusive: "false" # Cilium takes ownership of /etc/cni/net.d, pods cannot be scheduled with any other cni if cilium is down + cni-log-file: "/var/run/cilium/cilium-cni.log" + ipam-cilium-node-update-rate: "15s" + egress-gateway-reconciliation-trigger-interval: "1s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble + bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble + bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble + enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel + datapath-mode: "veth" + direct-routing-skip-unreachable: "false" + enable-runtime-device-detection: "false" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-node-selector-labels: "false" +## new values for 1.17 + ces-slice-mode: "fcfs" + enable-cilium-endpoint-slice: "true" + bpf-lb-source-range-all-types: "false" + bpf-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + enable-experimental-lb: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + health-check-icmp-failure-threshold: "3" + enable-internal-traffic-policy: "true" + enable-lb-ipam: "false" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "false" +## new values for 1.18 + # bpf-policy-stats-map-max specifies the maximum number of entries in global + # policy stats map + bpf-policy-stats-map-max: "65536" + identity-management-mode: "agent" + tofqdns-preallocate-identities: "true" + policy-default-local-cluster: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrole.yaml b/test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrole.yaml new file mode 100644 index 0000000000..8b0e97601b --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrole.yaml @@ -0,0 +1,282 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - delete +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cilium-config + verbs: + # allow patching of the configmap to set annotations + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to check apiserver connectivity + - namespaces + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + # Create auto-generated CNPs and CCNPs from Policies that have 'toGroups' + - create + - update + - deletecollection + # To update the status of the CNPs and CCNPs + - patch + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + # Update the auto-generated CNPs and CCNPs status. + - patch + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + # To perform garbage collection of such resources + - delete + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + # To synchronize garbage collection of such resources + - update +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + # To perform CiliumNode garbage collector + - delete +- apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs + verbs: + - create + - update + - get + - list + - watch + - delete + - patch +- apiGroups: + - cilium.io + resources: + - ciliumbgpclusterconfigs/status + - ciliumbgppeerconfigs/status + verbs: + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumloadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io + - ciliumgatewayclassconfigs.cilium.io +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumpodippools + - ciliumbgppeeringpolicies + - ciliumbgpclusterconfigs + - ciliumbgpnodeconfigoverrides + - ciliumbgppeerconfigs + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + verbs: + - patch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - acns-flowlog-config + verbs: + - update + - get + - list + - watch + - delete +- apiGroups: + - acn.azure.com + resources: + - retinanetworkflowlogs + - retinanetworkflowlogs/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + resourceNames: + - retinanetworkflowlogs.acn.azure.com + verbs: + - get + - list + - watch + - create + - update + - delete diff --git a/test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrolebinding.yaml b/test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrolebinding.yaml new file mode 100644 index 0000000000..eb164361d4 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-operator/files/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/cilium-operator/files/serviceaccount.yaml b/test/integration/manifests/cilium/v1.18/cilium-operator/files/serviceaccount.yaml new file mode 100644 index 0000000000..be4bfc048a --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-operator/files/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium-operator" + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/cilium-operator/templates/deployment.yaml b/test/integration/manifests/cilium/v1.18/cilium-operator/templates/deployment.yaml new file mode 100644 index 0000000000..9ae13e2278 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/cilium-operator/templates/deployment.yaml @@ -0,0 +1,171 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator +spec: + replicas: 2 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + prometheus.io/port: "9963" + prometheus.io/scrape: "true" + labels: + io.cilium/app: operator + name: cilium-operator + kubernetes.azure.com/ebpf-dataplane: cilium + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: cilium-operator + image: $CILIUM_IMAGE_REGISTRY/cilium/operator-generic:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + command: + - cilium-operator-generic + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + - --identity-gc-interval=0m20s + - --identity-heartbeat-timeout=0m20s + - --enable-cilium-endpoint-slice=true + - --ces-slice-mode=fcfs + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + ports: + - name: prometheus + containerPort: 9963 + hostPort: 9963 + protocol: TCP + securityContext: + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + livenessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: "cilium-operator" + serviceAccountName: "cilium-operator" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.azure.com/mode + operator: In + values: + - system + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + volumes: + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config diff --git a/test/integration/manifests/cilium/v1.18/ebpf/common/allowed-iptables-patterns.yaml b/test/integration/manifests/cilium/v1.18/ebpf/common/allowed-iptables-patterns.yaml new file mode 100644 index 0000000000..cd8b1ca589 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/common/allowed-iptables-patterns.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +data: + filter: | + ^.*--comment.*kubernetes load balancer firewall + ^.*--comment.*kubernetes health check service ports + ^.*--comment.*kubernetes externally visible service portals + ^.*--comment.*kubernetes forwarding rules + ^.*--comment.*kubernetes forwarding conntrack rule + ^.*--comment.*kubernetes service portals + ^.*--comment.*kubernetes externally-visible service portals + + -A INPUT -j KUBE-FIREWALL + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 32526 -j DROP + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 80 -j DROP + -A OUTPUT -j KUBE-FIREWALL + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + global: | + ^-N .* + ^-P .* + ^.*--comment.*cilium: + ^.*--comment.*cilium-feeder: + ^.*--comment.*AKS managed: added by AgentBaker + mangle: | + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 80 -j DROP + -A FORWARD -d 168.63.129.16/32 -p tcp -m tcp --dport 32526 -j DROP + nat: | + ^.*--comment.*metrics-server + ^.*--comment.*kube-dns + ^.*--comment.*gatekeeper-webhook-service + ^.*--comment.*azure-policy-webhook-service + ^.*--comment.*kubernetes:https cluster IP + ^.*--comment.*kubernetes forwarding rules + ^.*--comment.*kubernetes service traffic requiring SNAT + ^.*--comment.*kubernetes postrouting rules + ^.*--set-xmark 0x4000 + ^.*--comment.*kubernetes service portals + ^.*--comment.*kubernetes service nodeports + ^.*--comment.*kubernetes:https + ^.*--comment.*ip-masq-agent + ^.*0x4000/0x4000 + -A POSTROUTING -j SWIFT + -A SWIFT -s + -A POSTROUTING -j SWIFT-POSTROUTING + -A SWIFT-POSTROUTING -s + raw: "" + security: | + -A OUTPUT -d 168.63.129.16/32 -p tcp -m tcp --dport 53 -j ACCEPT + -A OUTPUT -d 168.63.129.16/32 -p tcp -m owner --uid-owner 0 -j ACCEPT + -A OUTPUT -d 168.63.129.16/32 -p tcp -m conntrack --ctstate INVALID,NEW -j DROP +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: allowed-iptables-patterns + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/ebpf/common/ccnps.yaml b/test/integration/manifests/cilium/v1.18/ebpf/common/ccnps.yaml new file mode 100644 index 0000000000..cc0b65e95b --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/common/ccnps.yaml @@ -0,0 +1,19 @@ +apiVersion: "cilium.io/v2" +kind: CiliumClusterwideNetworkPolicy +metadata: + name: block-azure-destinations +spec: + description: "Block TCP access to Azure destinations from all pods" + endpointSelector: {} # Applies to all pods in all namespaces + enableDefaultDeny: + egress: false + ingress: false + egressDeny: + - toCIDR: + - 168.63.129.16/32 # Azure DNS + toPorts: + - ports: + - port: "80" + protocol: TCP + - port: "32526" + protocol: TCP diff --git a/test/integration/manifests/cilium/v1.18/ebpf/common/cilium-agent-clusterrole.yaml b/test/integration/manifests/cilium/v1.18/ebpf/common/cilium-agent-clusterrole.yaml new file mode 100644 index 0000000000..30a5fecb72 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/common/cilium-agent-clusterrole.yaml @@ -0,0 +1,125 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: cilium + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - get +- apiGroups: + - cilium.io + resources: + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumloadbalancerippools + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - patch diff --git a/test/integration/manifests/cilium/v1.18/ebpf/common/ciliumclusterwidenetworkpolicies.yaml b/test/integration/manifests/cilium/v1.18/ebpf/common/ciliumclusterwidenetworkpolicies.yaml new file mode 100644 index 0000000000..1027fc52ed --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/common/ciliumclusterwidenetworkpolicies.yaml @@ -0,0 +1,7054 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + eno.azure.io/disable-updates: "true" + name: ciliumclusterwidenetworkpolicies.cilium.io +spec: + group: cilium.io + names: + categories: + - cilium + - ciliumpolicy + kind: CiliumClusterwideNetworkPolicy + listKind: CiliumClusterwideNetworkPolicyList + plural: ciliumclusterwidenetworkpolicies + shortNames: + - ccnp + singular: ciliumclusterwidenetworkpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Valid')].status + name: Valid + type: string + name: v2 + schema: + openAPIV3Schema: + description: |- + CiliumClusterwideNetworkPolicy is a Kubernetes third-party resource with an + modified version of CiliumNetworkPolicy which is cluster scoped rather than + namespace scoped. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + anyOf: + - properties: + ingress: {} + required: + - ingress + - properties: + ingressDeny: {} + required: + - ingressDeny + - properties: + egress: {} + required: + - egress + - properties: + egressDeny: {} + required: + - egressDeny + description: Spec is the desired Cilium specific rule specification. + oneOf: + - properties: + endpointSelector: {} + required: + - endpointSelector + - properties: + nodeSelector: {} + required: + - nodeSelector + properties: + description: + description: |- + Description is a free form string, it can be used by the creator of + the rule to store human readable explanation of the purpose of this + rule. Rules cannot be identified by comment. + type: string + egress: + description: |- + Egress is a list of EgressRule which are enforced at egress. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + authentication: + description: Authentication is the required authentication type + for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode for + the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toFQDNs: + description: |- + ToFQDN allows whitelisting DNS names in place of IPs. The IPs that result + from DNS resolution of `ToFQDN.MatchName`s are added to the same + EgressRule object as ToCIDRSet entries, and behave accordingly. Any L4 and + L7 rules within this EgressRule will also apply to these IPs. + The DNS -> IP mapping is re-resolved periodically from within the + cilium-agent, and the IPs in the DNS response are effected in the policy + for selected pods as-is (i.e. the list of IPs is not modified in any way). + Note: An explicit rule to allow for DNS traffic is needed for the pods, as + ToFQDN counts as an egress rule and will enforce egress policy when + PolicyEnforcment=default. + Note: If the resolved IPs are IPs within the kubernetes cluster, the + ToFQDN rule will not apply to that IP. + Note: ToFQDN cannot occur in the same policy as other To* rules. + items: + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + connect to. + + Example: + Any endpoint with the label "role=frontend" is allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed DNS + lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of the + secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and namespace + pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by k8s + labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector for + k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + egressDeny: + description: |- + EgressDeny is a list of EgressDenyRule which are enforced at egress. + Any rule inserted here will be denied regardless of the allowed egress + rules in the 'egress' field. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressDenyRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is not allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to connect + to. + + Example: + Any endpoint with the label "role=frontend" is not allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and namespace + pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by k8s + labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector for + k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + enableDefaultDeny: + description: |- + EnableDefaultDeny determines whether this policy configures the + subject endpoint(s) to have a default deny mode. If enabled, + this causes all traffic not explicitly allowed by a network policy + to be dropped. + + If not specified, the default is true for each traffic direction + that has rules, and false otherwise. For example, if a policy + only has Ingress or IngressDeny rules, then the default for + ingress is true and egress is false. + + If multiple policies apply to an endpoint, that endpoint's default deny + will be enabled if any policy requests it. + + This is useful for creating broad-based network policies that will not + cause endpoints to enter default-deny mode. + properties: + egress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to egress traffic. + type: boolean + ingress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to ingress traffic. + type: boolean + type: object + endpointSelector: + description: |- + EndpointSelector selects all endpoints which should be subject to + this rule. EndpointSelector and NodeSelector cannot be both empty and + are mutually exclusive. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + ingress: + description: |- + Ingress is a list of IngressRule which are enforced at ingress. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + authentication: + description: Authentication is the required authentication type + for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode for + the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + connections on port 80/tcp. + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed DNS + lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of the + secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + type: object + type: array + ingressDeny: + description: |- + IngressDeny is a list of IngressDenyRule which are enforced at ingress. + Any rule inserted here will be denied regardless of the allowed ingress + rules in the 'ingress' field. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressDenyRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet, FromGroups and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from + the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should be + an 8bit code (0-255), or it's CamelCase name (for + example, \"EchoReply\").\nAllowed ICMP types are:\n + \ Ipv4: EchoReply | DestinationUnreachable | + Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t EchoRequest + | EchoReply | MulticastListenerQuery| MulticastListenerReport + |\n\t\t\t MulticastListenerDone | RouterSolicitation + | RouterAdvertisement | NeighborSolicitation |\n\t\t\t + NeighborAdvertisement | RedirectMessage | RouterRenumbering + | ICMPNodeInformationQuery |\n\t\t\t ICMPNodeInformationResponse + | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement + |\n\t\t\t HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + connections on port 80/tcp. + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + type: object + type: array + labels: + description: |- + Labels is a list of optional strings which can be used to + re-identify the rule or to store metadata. It is possible to lookup + or delete strings based on labels. Labels are not required to be + unique, multiple rules can have overlapping or identical labels. + items: + description: Label is the Cilium's representation of a container + label. + properties: + key: + type: string + source: + description: 'Source can be one of the above values (e.g.: LabelSourceContainer).' + type: string + value: + type: string + required: + - key + type: object + type: array + nodeSelector: + description: |- + NodeSelector selects all nodes which should be subject to this rule. + EndpointSelector and NodeSelector cannot be both empty and are mutually + exclusive. Can only be used in CiliumClusterwideNetworkPolicies. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + specs: + description: Specs is a list of desired Cilium specific rule specification. + items: + anyOf: + - properties: + ingress: {} + required: + - ingress + - properties: + ingressDeny: {} + required: + - ingressDeny + - properties: + egress: {} + required: + - egress + - properties: + egressDeny: {} + required: + - egressDeny + description: |- + Rule is a policy rule which must be applied to all endpoints which match the + labels contained in the endpointSelector + + Each rule is split into an ingress section which contains all rules + applicable at ingress, and an egress section applicable at egress. For rule + types such as `L4Rule` and `CIDR` which can be applied at both ingress and + egress, both ingress and egress side have to either specifically allow the + connection or one side has to be omitted. + + Either ingress, egress, or both can be provided. If both ingress and egress + are omitted, the rule has no effect. + oneOf: + - properties: + endpointSelector: {} + required: + - endpointSelector + - properties: + nodeSelector: {} + required: + - nodeSelector + properties: + description: + description: |- + Description is a free form string, it can be used by the creator of + the rule to store human readable explanation of the purpose of this + rule. Rules cannot be identified by comment. + type: string + egress: + description: |- + Egress is a list of EgressRule which are enforced at egress. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + authentication: + description: Authentication is the required authentication + type for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode + for the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toFQDNs: + description: |- + ToFQDN allows whitelisting DNS names in place of IPs. The IPs that result + from DNS resolution of `ToFQDN.MatchName`s are added to the same + EgressRule object as ToCIDRSet entries, and behave accordingly. Any L4 and + L7 rules within this EgressRule will also apply to these IPs. + The DNS -> IP mapping is re-resolved periodically from within the + cilium-agent, and the IPs in the DNS response are effected in the policy + for selected pods as-is (i.e. the list of IPs is not modified in any way). + Note: An explicit rule to allow for DNS traffic is needed for the pods, as + ToFQDN counts as an egress rule and will enforce egress policy when + PolicyEnforcment=default. + Note: If the resolved IPs are IPs within the kubernetes cluster, the + ToFQDN rule will not apply to that IP. + Note: ToFQDN cannot occur in the same policy as other To* rules. + items: + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + connect to. + + Example: + Any endpoint with the label "role=frontend" is allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed + DNS lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of + the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and + namespace pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by + k8s labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector + for k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + egressDeny: + description: |- + EgressDeny is a list of EgressDenyRule which are enforced at egress. + Any rule inserted here will be denied regardless of the allowed egress + rules in the 'egress' field. + If omitted or empty, this rule does not apply at egress. + items: + description: |- + EgressDenyRule contains all rule types which can be applied at egress, i.e. + network traffic that originates inside the endpoint and exits the endpoint + selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members of the structure are specified, then all members + must match in order for the rule to take effect. The exception to this + rule is the ToRequires member; the effects of any Requires field in any + rule will apply to all other rules as well. + + - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are + mutually exclusive. Only one of these members may be present within an + individual rule. + properties: + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to connect to. + + Example: + Any endpoint with the label "app=httpd" is not allowed to initiate + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toCIDR: + description: |- + ToCIDR is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections. Only connections destined for + outside of the cluster and not targeting the host will be subject + to CIDR rules. This will match on the destination IP address of + outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet + with no ExcludeCIDRs is equivalent. Overlaps are allowed between + ToCIDR and ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + toCIDRSet: + description: |- + ToCIDRSet is a list of IP blocks which the endpoint subject to the rule + is allowed to initiate connections to in addition to connections + which are allowed via ToEndpoints, along with a list of subnets contained + within their corresponding IP block to which traffic should not be + allowed. This will match on the destination IP address of outgoing + connections. Adding a prefix into ToCIDR or into ToCIDRSet with no + ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and + ToCIDRSet. + + Example: + Any endpoint with the label "app=database-proxy" is allowed to + initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + toEndpoints: + description: |- + ToEndpoints is a list of endpoints identified by an EndpointSelector to + which the endpoints subject to the rule are allowed to communicate. + + Example: + Any endpoint with the label "role=frontend" can communicate with any + endpoint carrying the label "role=backend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toEntities: + description: |- + ToEntities is a list of special entities to which the endpoint subject + to the rule is allowed to initiate connections. Supported entities are + `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`, + `health`,`unmanaged` and `all`. + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + toGroups: + description: |- + ToGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + toGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + toNodes: + description: |- + ToNodes is a list of nodes identified by an + EndpointSelector to which endpoints subject to the rule is allowed to communicate. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to connect + to. + + Example: + Any endpoint with the label "role=frontend" is not allowed to initiate + connections to destination port 8080/tcp + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + toRequires: + description: |- + ToRequires is a list of additional constraints which must be met + in order for the selected endpoints to be able to connect to other + endpoints. These additional constraints do no by itself grant access + privileges and must always be accompanied with at least one matching + ToEndpoints. + + Example: + Any Endpoint with the label "team=A" requires any endpoint to which it + communicates to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + toServices: + description: |- + ToServices is a list of services to which the endpoint subject + to the rule is allowed to initiate connections. + Currently Cilium only supports toServices for K8s services. + items: + description: |- + Service selects policy targets that are bundled as part of a + logical load-balanced service. + + Currently only Kubernetes-based Services are supported. + properties: + k8sService: + description: K8sService selects service by name and + namespace pair + properties: + namespace: + type: string + serviceName: + type: string + type: object + k8sServiceSelector: + description: K8sServiceSelector selects services by + k8s labels and namespace + properties: + namespace: + type: string + selector: + description: ServiceSelector is a label selector + for k8s services + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the + value from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - selector + type: object + type: object + type: array + type: object + type: array + enableDefaultDeny: + description: |- + EnableDefaultDeny determines whether this policy configures the + subject endpoint(s) to have a default deny mode. If enabled, + this causes all traffic not explicitly allowed by a network policy + to be dropped. + + If not specified, the default is true for each traffic direction + that has rules, and false otherwise. For example, if a policy + only has Ingress or IngressDeny rules, then the default for + ingress is true and egress is false. + + If multiple policies apply to an endpoint, that endpoint's default deny + will be enabled if any policy requests it. + + This is useful for creating broad-based network policies that will not + cause endpoints to enter default-deny mode. + properties: + egress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to egress traffic. + type: boolean + ingress: + description: |- + Whether or not the endpoint should have a default-deny rule applied + to ingress traffic. + type: boolean + type: object + endpointSelector: + description: |- + EndpointSelector selects all endpoints which should be subject to + this rule. EndpointSelector and NodeSelector cannot be both empty and + are mutually exclusive. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + ingress: + description: |- + Ingress is a list of IngressRule which are enforced at ingress. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + authentication: + description: Authentication is the required authentication + type for the allowed traffic, if any. + properties: + mode: + description: Mode is the required authentication mode + for the allowed traffic, if any. + enum: + - disabled + - required + - test-always-fail + type: string + required: + - mode + type: object + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can only accept incoming + connections on port 80/tcp. + items: + description: |- + PortRule is a list of ports/protocol combinations with optional Layer 7 + rules which must be met. + properties: + listener: + description: |- + listener specifies the name of a custom Envoy listener to which this traffic should be + redirected to. + properties: + envoyConfig: + description: |- + EnvoyConfig is a reference to the CEC or CCEC resource in which + the listener is defined. + properties: + kind: + description: |- + Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or + CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, + respectively. The only case this is currently explicitly needed is when referring to a + CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener + from a cluster scoped policy is not allowed. + enum: + - CiliumEnvoyConfig + - CiliumClusterwideEnvoyConfig + type: string + name: + description: |- + Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where + the listener is defined in. + minLength: 1 + type: string + required: + - name + type: object + name: + description: Name is the name of the listener. + minLength: 1 + type: string + priority: + description: |- + Priority for this Listener that is used when multiple rules would apply different + listeners to a policy map entry. Behavior of this is implementation dependent. + maximum: 100 + minimum: 1 + type: integer + required: + - envoyConfig + - name + type: object + originatingTLS: + description: |- + OriginatingTLS is the TLS context for the connections originated by + the L7 proxy. For egress policy this specifies the client-side TLS + parameters for the upstream connection originating from the L7 proxy + to the remote destination. For ingress policy this specifies the + client-side TLS parameters for the connection from the L7 proxy to + the local endpoint. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + maxItems: 40 + type: array + rules: + description: |- + Rules is a list of additional port level rules which must be met in + order for the PortRule to allow the traffic. If omitted or empty, + no layer 7 rules are enforced. + oneOf: + - properties: + http: {} + required: + - http + - properties: + kafka: {} + required: + - kafka + - properties: + dns: {} + required: + - dns + - properties: + l7proto: {} + required: + - l7proto + properties: + dns: + description: DNS-specific rules. + items: + description: PortRuleDNS is a list of allowed + DNS lookups. + oneOf: + - properties: + matchName: {} + required: + - matchName + - properties: + matchPattern: {} + required: + - matchPattern + properties: + matchName: + description: |- + MatchName matches literal DNS names. A trailing "." is automatically added + when missing. + maxLength: 255 + pattern: ^([-a-zA-Z0-9_]+[.]?)+$ + type: string + matchPattern: + description: |- + MatchPattern allows using wildcards to match DNS names. All wildcards are + case insensitive. The wildcards are: + - "*" matches 0 or more DNS valid characters, and may occur anywhere in + the pattern. As a special case a "*" as the leftmost character, without a + following "." matches all subdomains as well as the name to the right. + A trailing "." is automatically added when missing. + + Examples: + `*.cilium.io` matches subomains of cilium at that level + www.cilium.io and blog.cilium.io match, cilium.io and google.com do not + `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io" + except those containing "." separator, subcilium.io and sub-cilium.io match, + www.cilium.io and blog.cilium.io does not + sub*.cilium.io matches subdomains of cilium where the subdomain component + begins with "sub" + sub.cilium.io and subdomain.cilium.io match, www.cilium.io, + blog.cilium.io, cilium.io and google.com do not + maxLength: 255 + pattern: ^([-a-zA-Z0-9_*]+[.]?)+$ + type: string + type: object + type: array + http: + description: HTTP specific rules. + items: + description: |- + PortRuleHTTP is a list of HTTP protocol constraints. All fields are + optional, if all fields are empty or missing, the rule does not have any + effect. + + All fields of this type are extended POSIX regex as defined by IEEE Std + 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) + matched against the path of an incoming request. Currently it can contain + characters disallowed from the conventional "path" part of a URL as defined + by RFC 3986. + properties: + headerMatches: + description: |- + HeaderMatches is a list of HTTP headers which must be + present and match against the given values. Mismatch field can be used + to specify what to do when there is no match. + items: + description: |- + HeaderMatch extends the HeaderValue for matching requirement of a + named header field against an immediate string, a secret value, or + a regex. If none of the optional fields is present, then the + header value is not matched, only presence of the header is enough. + properties: + mismatch: + description: |- + Mismatch identifies what to do in case there is no match. The default is + to drop the request. Otherwise the overall rule is still considered as + matching, but the mismatches are logged in the access log. + enum: + - LOG + - ADD + - DELETE + - REPLACE + type: string + name: + description: Name identifies the header. + minLength: 1 + type: string + secret: + description: |- + Secret refers to a secret that contains the value to be matched against. + The secret must only contain one entry. If the referred secret does not + exist, and there is no "Value" specified, the match will fail. + properties: + name: + description: Name is the name of + the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + value: + description: |- + Value matches the exact value of the header. Can be specified either + alone or together with "Secret"; will be used as the header value if the + secret can not be found in the latter case. + type: string + required: + - name + type: object + type: array + headers: + description: |- + Headers is a list of HTTP headers which must be present in the + request. If omitted or empty, requests are allowed regardless of + headers present. + items: + type: string + type: array + host: + description: |- + Host is an extended POSIX regex matched against the host header of a + request. Examples: + + - foo.bar.com will match the host fooXbar.com or foo-bar.com + - foo\.bar\.com will only match the host foo.bar.com + + If omitted or empty, the value of the host header is ignored. + format: idn-hostname + type: string + method: + description: |- + Method is an extended POSIX regex matched against the method of a + request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ... + + If omitted or empty, all methods are allowed. + type: string + path: + description: |- + Path is an extended POSIX regex matched against the path of a + request. Currently it can contain characters disallowed from the + conventional "path" part of a URL as defined by RFC 3986. + + If omitted or empty, all paths are all allowed. + type: string + type: object + type: array + kafka: + description: Kafka-specific rules. + items: + description: |- + PortRule is a list of Kafka protocol constraints. All fields are + optional, if all fields are empty or missing, the rule will match all + Kafka messages. + properties: + apiKey: + description: |- + APIKey is a case-insensitive string matched against the key of a + request, e.g. "produce", "fetch", "createtopic", "deletetopic", et al + Reference: https://kafka.apache.org/protocol#protocol_api_keys + + If omitted or empty, and if Role is not specified, then all keys are allowed. + type: string + apiVersion: + description: |- + APIVersion is the version matched against the api version of the + Kafka message. If set, it has to be a string representing a positive + integer. + + If omitted or empty, all versions are allowed. + type: string + clientID: + description: |- + ClientID is the client identifier as provided in the request. + + From Kafka protocol documentation: + This is a user supplied identifier for the client application. The + user can use any identifier they like and it will be used when + logging errors, monitoring aggregates, etc. For example, one might + want to monitor not just the requests per second overall, but the + number coming from each client application (each of which could + reside on multiple servers). This id acts as a logical grouping + across all requests from a particular client. + + If omitted or empty, all client identifiers are allowed. + type: string + role: + description: |- + Role is a case-insensitive string and describes a group of API keys + necessary to perform certain higher-level Kafka operations such as "produce" + or "consume". A Role automatically expands into all APIKeys required + to perform the specified higher-level operation. + + The following values are supported: + - "produce": Allow producing to the topics specified in the rule + - "consume": Allow consuming from the topics specified in the rule + + This field is incompatible with the APIKey field, i.e APIKey and Role + cannot both be specified in the same rule. + + If omitted or empty, and if APIKey is not specified, then all keys are + allowed. + enum: + - produce + - consume + type: string + topic: + description: |- + Topic is the topic name contained in the message. If a Kafka request + contains multiple topics, then all topics must be allowed or the + message will be rejected. + + This constraint is ignored if the matched request message type + doesn't contain any topic. Maximum size of Topic can be 249 + characters as per recent Kafka spec and allowed characters are + a-z, A-Z, 0-9, -, . and _. + + Older Kafka versions had longer topic lengths of 255, but in Kafka 0.10 + version the length was changed from 255 to 249. For compatibility + reasons we are using 255. + + If omitted or empty, all topics are allowed. + maxLength: 255 + type: string + type: object + type: array + l7: + description: Key-value pair rules. + items: + additionalProperties: + type: string + description: |- + PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as + protocol constraints. All fields are optional, if all fields are empty or + missing, the rule does not have any effect. + type: object + type: array + l7proto: + description: Name of the L7 protocol for which the + Key-value pair rules apply. + type: string + type: object + serverNames: + description: |- + ServerNames is a list of allowed TLS SNI values. If not empty, then + TLS must be present and one of the provided SNIs must be indicated in the + TLS handshake. + items: + type: string + type: array + terminatingTLS: + description: |- + TerminatingTLS is the TLS context for the connection terminated by + the L7 proxy. For egress policy this specifies the server-side TLS + parameters to be applied on the connections originated from the local + endpoint and terminated by the L7 proxy. For ingress policy this specifies + the server-side TLS parameters to be applied on the connections + originated from a remote source and terminated by the L7 proxy. + properties: + certificate: + description: |- + Certificate is the file name or k8s secret item name for the certificate + chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the + item must exist. + type: string + privateKey: + description: |- + PrivateKey is the file name or k8s secret item name for the private key + matching the certificate chain. If omitted, 'tls.key' is assumed, if it + exists. If given, the item must exist. + type: string + secret: + description: |- + Secret is the secret that contains the certificates and private key for + the TLS context. + By default, Cilium will search in this secret for the following items: + - 'ca.crt' - Which represents the trusted CA to verify remote source. + - 'tls.crt' - Which represents the public key certificate. + - 'tls.key' - Which represents the private key matching the public key + certificate. + properties: + name: + description: Name is the name of the secret. + type: string + namespace: + description: |- + Namespace is the namespace in which the secret exists. Context of use + determines the default value if left out (e.g., "default"). + type: string + required: + - name + type: object + trustedCA: + description: |- + TrustedCA is the file name or k8s secret item name for the trusted CA. + If omitted, 'ca.crt' is assumed, if it exists. If given, the item must + exist. + type: string + required: + - secret + type: object + type: object + type: array + type: object + type: array + ingressDeny: + description: |- + IngressDeny is a list of IngressDenyRule which are enforced at ingress. + Any rule inserted here will be denied regardless of the allowed ingress + rules in the 'ingress' field. + If omitted or empty, this rule does not apply at ingress. + items: + description: |- + IngressDenyRule contains all rule types which can be applied at ingress, + i.e. network traffic that originates outside of the endpoint and + is entering the endpoint selected by the endpointSelector. + + - All members of this structure are optional. If omitted or empty, the + member will have no effect on the rule. + + - If multiple members are set, all of them need to match in order for + the rule to take effect. The exception to this rule is FromRequires field; + the effects of any Requires field in any rule will apply to all other + rules as well. + + - FromEndpoints, FromCIDR, FromCIDRSet, FromGroups and FromEntities are mutually + exclusive. Only one of these members may be present within an individual + rule. + properties: + fromCIDR: + description: |- + FromCIDR is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from. Only connections which + do *not* originate from the cluster or from the local host are subject + to CIDR rules. In order to allow in-cluster connectivity, use the + FromEndpoints field. This will match on the source IP address of + incoming connections. Adding a prefix into FromCIDR or into + FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are + allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.3.9.1 + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + fromCIDRSet: + description: |- + FromCIDRSet is a list of IP blocks which the endpoint subject to the + rule is allowed to receive connections from in addition to FromEndpoints, + along with a list of subnets contained within their corresponding IP block + from which traffic should not be allowed. + This will match on the source IP address of incoming connections. Adding + a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is + equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet. + + Example: + Any endpoint with the label "app=my-legacy-pet" is allowed to receive + connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12. + items: + description: |- + CIDRRule is a rule that specifies a CIDR prefix to/from which outside + communication is allowed, along with an optional list of subnets within that + CIDR prefix to/from which outside communication is not allowed. + oneOf: + - properties: + cidr: {} + required: + - cidr + - properties: + cidrGroupRef: {} + required: + - cidrGroupRef + - properties: + cidrGroupSelector: {} + required: + - cidrGroupSelector + properties: + cidr: + description: CIDR is a CIDR prefix / IP Block. + format: cidr + type: string + cidrGroupRef: + description: |- + CIDRGroupRef is a reference to a CiliumCIDRGroup object. + A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to + the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + connections from. + maxLength: 253 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + cidrGroupSelector: + description: |- + CIDRGroupSelector selects CiliumCIDRGroups by their labels, + rather than by name. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + except: + description: |- + ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule + is not allowed to initiate connections to. These CIDR prefixes should be + contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not + supported yet. + These exceptions are only applied to the Cidr in this CIDRRule, and do not + apply to any other CIDR prefixes in any other CIDRRules. + items: + description: |- + CIDR specifies a block of IP addresses. + Example: 192.0.2.1/32 + format: cidr + type: string + type: array + type: object + type: array + fromEndpoints: + description: |- + FromEndpoints is a list of endpoints identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + + Example: + Any endpoint with the label "role=backend" can be consumed by any + endpoint carrying the label "role=frontend". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromEntities: + description: |- + FromEntities is a list of special entities which the endpoint subject + to the rule is allowed to receive connections from. Supported entities are + `world`, `cluster` and `host` + items: + description: |- + Entity specifies the class of receiver/sender endpoints that do not have + individual identities. Entities are used to describe "outside of cluster", + "host", etc. + enum: + - all + - world + - cluster + - host + - init + - ingress + - unmanaged + - remote-node + - health + - none + - kube-apiserver + type: string + type: array + fromGroups: + description: |- + FromGroups is a directive that allows the integration with multiple outside + providers. Currently, only AWS is supported, and the rule can select by + multiple sub directives: + + Example: + FromGroups: + - aws: + securityGroupsIds: + - 'sg-XXXXXXXXXXXXX' + items: + description: |- + Groups structure to store all kinds of new integrations that needs a new + derivative policy. + properties: + aws: + description: AWSGroup is an structure that can be used + to whitelisting information from AWS integration + properties: + labels: + additionalProperties: + type: string + type: object + region: + type: string + securityGroupsIds: + items: + type: string + type: array + securityGroupsNames: + items: + type: string + type: array + type: object + type: object + type: array + fromNodes: + description: |- + FromNodes is a list of nodes identified by an + EndpointSelector which are allowed to communicate with the endpoint + subject to the rule. + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + fromRequires: + description: |- + FromRequires is a list of additional constraints which must be met + in order for the selected endpoints to be reachable. These + additional constraints do no by itself grant access privileges and + must always be accompanied with at least one matching FromEndpoints. + + Example: + Any Endpoint with the label "team=A" requires consuming endpoint + to also carry the label "team=A". + items: + description: EndpointSelector is a wrapper for k8s LabelSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value + from the MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: array + icmps: + description: |- + ICMPs is a list of ICMP rule identified by type number + which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + type 8 ICMP connections. + items: + description: ICMPRule is a list of ICMP fields. + properties: + fields: + description: Fields is a list of ICMP fields. + items: + description: ICMPField is a ICMP field. + properties: + family: + default: IPv4 + description: |- + Family is a IP address version. + Currently, we support `IPv4` and `IPv6`. + `IPv4` is set as default. + enum: + - IPv4 + - IPv6 + type: string + type: + anyOf: + - type: integer + - type: string + description: "Type is a ICMP-type.\nIt should + be an 8bit code (0-255), or it's CamelCase name + (for example, \"EchoReply\").\nAllowed ICMP + types are:\n Ipv4: EchoReply | DestinationUnreachable + | Redirect | Echo | EchoRequest |\n\t\t RouterAdvertisement + | RouterSelection | TimeExceeded | ParameterProblem + |\n\t\t\t Timestamp | TimestampReply | Photuris + | ExtendedEcho Request | ExtendedEcho Reply\n + \ Ipv6: DestinationUnreachable | PacketTooBig + | TimeExceeded | ParameterProblem |\n\t\t\t + EchoRequest | EchoReply | MulticastListenerQuery| + MulticastListenerReport |\n\t\t\t MulticastListenerDone + | RouterSolicitation | RouterAdvertisement | + NeighborSolicitation |\n\t\t\t NeighborAdvertisement + | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery + |\n\t\t\t ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation + | InverseNeighborDiscoveryAdvertisement |\n\t\t\t + HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply + | MobilePrefixSolicitation |\n\t\t\t MobilePrefixAdvertisement + | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix + |\n\t\t\t ExtendedEchoRequest | ExtendedEchoReply" + pattern: ^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho + Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$ + x-kubernetes-int-or-string: true + required: + - type + type: object + maxItems: 40 + type: array + type: object + type: array + toPorts: + description: |- + ToPorts is a list of destination ports identified by port number and + protocol which the endpoint subject to the rule is not allowed to + receive connections on. + + Example: + Any endpoint with the label "app=httpd" can not accept incoming + connections on port 80/tcp. + items: + description: |- + PortDenyRule is a list of ports/protocol that should be used for deny + policies. This structure lacks the L7Rules since it's not supported in deny + policies. + properties: + ports: + description: Ports is a list of L4 port/protocol + items: + description: PortProtocol specifies an L4 port with + an optional transport protocol + properties: + endPort: + description: EndPort can only be an L4 port number. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + description: |- + Port can be an L4 port number, or a name in the form of "http" + or "http-8080". + pattern: ^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$ + type: string + protocol: + description: |- + Protocol is the L4 protocol. If omitted or empty, any protocol + matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" + + Matching on ICMP is not supported. + + Named port specified for a container may narrow this down, but may not + contradict this. + enum: + - TCP + - UDP + - SCTP + - ANY + type: string + required: + - port + type: object + type: array + type: object + type: array + type: object + type: array + labels: + description: |- + Labels is a list of optional strings which can be used to + re-identify the rule or to store metadata. It is possible to lookup + or delete strings based on labels. Labels are not required to be + unique, multiple rules can have overlapping or identical labels. + items: + description: Label is the Cilium's representation of a container + label. + properties: + key: + type: string + source: + description: 'Source can be one of the above values (e.g.: + LabelSourceContainer).' + type: string + value: + type: string + required: + - key + type: object + type: array + nodeSelector: + description: |- + NodeSelector selects all nodes which should be subject to this rule. + EndpointSelector and NodeSelector cannot be both empty and are mutually + exclusive. Can only be used in CiliumClusterwideNetworkPolicies. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + description: MatchLabelsValue represents the value from the + MatchLabels {key,value} pair. + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + status: + description: |- + Status is the status of the Cilium policy rule. + + The reason this field exists in this structure is due a bug in the k8s + code-generator that doesn't create a `UpdateStatus` method because the + field does not exist in the structure. + properties: + conditions: + items: + properties: + lastTransitionTime: + description: The last time the condition transitioned from one + status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: The status of the condition, one of True, False, + or Unknown + type: string + type: + description: The type of the policy condition + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + derivativePolicies: + additionalProperties: + description: |- + CiliumNetworkPolicyNodeStatus is the status of a Cilium policy rule for a + specific node. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations corresponds to the Annotations in the ObjectMeta of the CNP + that have been realized on the node for CNP. That is, if a CNP has been + imported and has been assigned annotation X=Y by the user, + Annotations in CiliumNetworkPolicyNodeStatus will be X=Y once the + CNP that was imported corresponding to Annotation X=Y has been realized on + the node. + type: object + enforcing: + description: |- + Enforcing is set to true once all endpoints present at the time the + policy has been imported are enforcing this policy. + type: boolean + error: + description: |- + Error describes any error that occurred when parsing or importing the + policy, or realizing the policy for the endpoints to which it applies + on the node. + type: string + lastUpdated: + description: LastUpdated contains the last time this status + was updated + format: date-time + type: string + localPolicyRevision: + description: |- + Revision is the policy revision of the repository which first implemented + this policy. + format: int64 + type: integer + ok: + description: |- + OK is true when the policy has been parsed and imported successfully + into the in-memory policy repository on the node. + type: boolean + type: object + description: |- + DerivativePolicies is the status of all policies derived from the Cilium + policy + type: object + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml b/test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml new file mode 100644 index 0000000000..fde0f2bdf6 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml @@ -0,0 +1,538 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + kubernetes.azure.com/managedby: aks + name: cilium + namespace: kube-system +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + labels: + app.kubernetes.io/managed-by: Eno + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + kubernetes.azure.com/managedby: aks + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: KUBE_CLIENT_BACKOFF_BASE + value: "1" + - name: KUBE_CLIENT_BACKOFF_DURATION + value: "120" + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + - name: require-k8s-connectivity + value: "false" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + mountPropagation: HostToContainer + name: cilium-netns + - mountPath: /flowlog-config + name: hubble-flowlog-config + readOnly: true + - mountPath: /var/log/acns/hubble + name: networkflowlogs + - mountPath: /etc/config + name: azure-ip-masq-dir + readOnly: true + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -checkMap=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - mountPath: /azure-block-iptables-bpf-map + name: iptables-block-bpf-map + readOnly: true + - command: + - ./azure-ip-masq-merger + - -v + - "2" + image: $AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY/azure-ip-masq-merger:$AZURE_IP_MASQ_MERGER_TAG + imagePullPolicy: IfNotPresent + name: azure-ip-masq-merger + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config/ + name: azure-ip-masq-agent-config-volume + - mountPath: /etc/merged-config/ + name: azure-ip-masq-dir + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - command: + - /azure-block-iptables + - -mode=attach + - -overwrite=true + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: iptables-blocker-init + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /proc + name: hostproc + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -terminateOnSuccess=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor-init + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - command: + - /install-plugin.sh + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: {} + securityContext: + capabilities: + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + - configMap: + defaultMode: 420 + name: acns-flowlog-config + optional: true + name: hubble-flowlog-config + - hostPath: + path: /var/log/acns/hubble + type: DirectoryOrCreate + name: networkflowlogs + - configMap: + defaultMode: 420 + name: allowed-iptables-patterns + optional: true + name: iptables-config + - hostPath: + path: /sys/fs/bpf/azure-block-iptables + type: DirectoryOrCreate + name: iptables-block-bpf-map + - emptyDir: {} + name: azure-ip-masq-dir + - name: azure-ip-masq-agent-config-volume + projected: + defaultMode: 420 + sources: + - configMap: + items: + - key: ip-masq-agent + mode: 444 + path: ip-masq-agent + name: azure-ip-masq-agent-config + optional: true + - configMap: + items: + - key: ip-masq-agent-reconciled + mode: 444 + path: ip-masq-agent-reconciled + name: azure-ip-masq-agent-config-reconciled + optional: true + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 5% + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.18/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml b/test/integration/manifests/cilium/v1.18/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml new file mode 100644 index 0000000000..e6d8edca6a --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/overlay/static/azure-ip-masq-agent-config-reconciled.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +data: + ip-masq-agent-reconciled: | + MasqLinkLocal: true + NonMasqueradeCIDRs: + - 192.168.0.0/16 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/managed-by: Eno + component: ip-masq-agent + name: azure-ip-masq-agent-config-reconciled + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/ebpf/overlay/static/cilium-config.yaml b/test/integration/manifests/cilium/v1.18/ebpf/overlay/static/cilium-config.yaml new file mode 100644 index 0000000000..ea1192adb6 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/overlay/static/cilium-config.yaml @@ -0,0 +1,173 @@ +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-algorithm-annotation: "false" + bpf-events-drop-enabled: "true" + bpf-events-policy-verdict-enabled: "true" + bpf-events-trace-enabled: "true" + bpf-lb-acceleration: disabled + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-lb-mode-annotation: "false" + bpf-lb-sock: "false" + bpf-lb-sock-hostns-only: "true" + bpf-lb-sock-terminate-pod-connections: "false" + bpf-lb-source-range-all-types: "false" + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + ces-slice-mode: fcfs + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: alewoverebpfcilcanary + cni-exclusive: "false" + cni-log-file: /var/run/cilium/cilium-cni.log + datapath-mode: veth + debug: "false" + direct-routing-skip-unreachable: "false" + disable-cnp-status-updates: "true" + disable-embedded-dns-proxy: "false" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "false" + egress-gateway-reconciliation-trigger-interval: 1s + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-bpf-masquerade: "true" + enable-cilium-endpoint-slice: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + enable-endpoint-routes: "true" + enable-experimental-lb: "false" + enable-health-check-loadbalancer-ip: "false" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "false" + enable-hubble: "true" + enable-hubble-open-metrics: "false" + enable-internal-traffic-policy: "true" + enable-ip-masq-agent: "true" + enable-ipv4: "true" + enable-ipv4-big-tcp: "false" + enable-ipv4-masquerade: "true" + enable-ipv6: "false" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "false" + enable-k8s-networkpolicy: "true" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "true" + enable-lb-ipam: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" + enable-masquerade-to-route-source: "false" + enable-metrics: "true" + enable-node-selector-labels: "false" + enable-non-default-deny-policies: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-remote-node-masquerade: "true" + enable-runtime-device-detection: "false" + enable-sctp: "false" + enable-session-affinity: "true" + enable-source-ip-verification: "false" + enable-standalone-dns-proxy: "true" + enable-svc-source-range-check: "true" + enable-tcx: "false" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-wireguard: "false" + enable-xt-socket-fallback: "true" + external-envoy-proxy: "false" + health-check-icmp-failure-threshold: "3" + hubble-disable-tls: "false" + hubble-event-buffer-capacity: "4095" + hubble-export-file-max-backups: "5" + hubble-export-file-max-size-mb: "10" + hubble-flowlogs-config-path: /flowlog-config/flowlogs.yaml + hubble-listen-address: :4244 + hubble-metrics: flow:sourceEgressContext=pod;destinationIngressContext=pod tcp:sourceEgressContext=pod;destinationIngressContext=pod + drop:sourceEgressContext=pod;destinationIngressContext=pod dns:sourceEgressContext=pod;destinationIngressContext=pod + hubble-metrics-server: :9965 + hubble-socket-path: /var/run/cilium/hubble.sock + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + ipam-cilium-node-update-rate: 15s + ipv4-native-routing-cidr: 192.168.0.0/16 + k8s-client-burst: "20" + k8s-client-qps: "10" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: 0.0.0.0:10256 + local-router-ipv4: 169.254.23.0 + mesh-auth-enabled: "false" + mesh-auth-gc-interval: 5m0s + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + metrics: +cilium_bpf_map_pressure +cilium_proxy_datapath_update_timeout_total + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + nat-map-stats-entries: "32" + nat-map-stats-interval: 30s + node-port-bind-protection: "true" + nodeport-addresses: "" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + proxy-connect-timeout: "2" + proxy-idle-timeout-seconds: "60" + proxy-max-connection-duration-seconds: "0" + proxy-max-requests-per-connection: "0" + proxy-xff-num-trusted-hops-egress: "0" + proxy-xff-num-trusted-hops-ingress: "0" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" + tofqdns-proxy-port: "40046" + tofqdns-proxy-response-max-delay: 100ms + tofqdns-server-port: "40045" + unmanaged-pod-watcher-interval: "0" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + ## new values for 1.18 + # bpf-policy-stats-map-max specifies the maximum number of entries in global + # policy stats map + bpf-policy-stats-map-max: "65536" + identity-management-mode: "agent" + tofqdns-preallocate-identities: "true" + policy-default-local-cluster: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml new file mode 100644 index 0000000000..0760191fc2 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml @@ -0,0 +1,508 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + kubernetes.azure.com/managedby: aks + name: cilium + namespace: kube-system +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + prometheus.io/port: "9962" + prometheus.io/scrape: "true" + labels: + app.kubernetes.io/managed-by: Eno + k8s-app: cilium + kubernetes.azure.com/ebpf-dataplane: cilium + kubernetes.azure.com/managedby: aks + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/cluster + operator: Exists + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/os + operator: In + values: + - linux + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: KUBE_CLIENT_BACKOFF_BASE + value: "1" + - name: KUBE_CLIENT_BACKOFF_DURATION + value: "120" + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + - name: require-k8s-connectivity + value: "false" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/run/cilium/netns + mountPropagation: HostToContainer + name: cilium-netns + - mountPath: /flowlog-config + name: hubble-flowlog-config + readOnly: true + - mountPath: /var/log/acns/hubble + name: networkflowlogs + - mountPath: /etc/config + name: azure-ip-masq-dir + readOnly: true + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -checkMap=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - mountPath: /azure-block-iptables-bpf-map + name: iptables-block-bpf-map + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + initContainers: + - command: + - /azure-block-iptables + - -mode=attach + - -overwrite=true + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: iptables-blocker-init + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /proc + name: hostproc + - command: + - /azure-iptables-monitor + - -v + - "3" + - -events=true + - -terminateOnSuccess=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: $AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY/azure-iptables-monitor:$AZURE_IPTABLES_MONITOR_TAG + imagePullPolicy: IfNotPresent + name: azure-iptables-monitor-init + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/config + name: iptables-config + - command: + - /install-plugin.sh + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: {} + securityContext: + capabilities: + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-cgroup + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + resources: {} + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + resources: {} + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + appArmorProfile: + type: Unconfined + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/systemd + type: DirectoryOrCreate + name: host-etc-systemd + - hostPath: + path: /lib/systemd + type: DirectoryOrCreate + name: host-lib-systemd + - hostPath: + path: /usr/lib + type: DirectoryOrCreate + name: host-usr-lib + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + defaultMode: 420 + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + - configMap: + defaultMode: 420 + name: acns-flowlog-config + optional: true + name: hubble-flowlog-config + - hostPath: + path: /var/log/acns/hubble + type: DirectoryOrCreate + name: networkflowlogs + - configMap: + defaultMode: 420 + name: allowed-iptables-patterns + optional: true + name: iptables-config + - hostPath: + path: /sys/fs/bpf/azure-block-iptables + type: DirectoryOrCreate + name: iptables-block-bpf-map + - configMap: + defaultMode: 420 + name: azure-dns-imds-ip-masq-agent-config + optional: true + name: azure-ip-masq-dir + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 5% + type: RollingUpdate diff --git a/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml new file mode 100644 index 0000000000..2613c3b265 --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/azure-dns-imds-ip-masq-agent-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +data: + ip-masq-agent: "nonMasqueradeCIDRs:\n- 0.0.0.0/1\n- 128.0.0.0/3\n- 160.0.0.0/5\n- + 168.0.0.0/11\n- 168.32.0.0/12\n- 168.48.0.0/13\n- 168.56.0.0/14\n- 168.60.0.0/15\n- + 168.62.0.0/16\n- 168.63.0.0/17\n- 168.63.128.0/24\n- 168.63.129.0/29\n- 168.63.129.8/30\n- + 168.63.129.12/30\n- 168.63.129.17/32\n- 168.63.129.18/31\n- 168.63.129.20/30\n- + 168.63.129.24/29\n- 168.63.129.32/27\n- 168.63.129.64/26\n- 168.63.129.128/25\n- + 168.63.130.0/23\n- 168.63.132.0/22\n- 168.63.136.0/21\n- 168.63.144.0/20\n- 168.63.160.0/19\n- + 168.63.192.0/18\n- 168.64.0.0/10\n- 168.128.0.0/9\n- 169.0.0.0/9\n- 169.128.0.0/10\n- + 169.192.0.0/11\n- 169.224.0.0/12\n- 169.240.0.0/13\n- 169.248.0.0/14\n- 169.252.0.0/15\n- + 169.254.0.0/17\n- 169.254.128.0/19\n- 169.254.160.0/21\n- 169.254.168.0/24\n- + 169.254.169.0/25\n- 169.254.169.128/26\n- 169.254.169.192/27\n- 169.254.169.224/28\n- + 169.254.169.240/29\n- 169.254.169.248/30\n- 169.254.169.252/31\n- 169.254.169.255/32\n- + 169.254.170.0/23\n- 169.254.172.0/22\n- 169.254.176.0/20\n- 169.254.192.0/18\n- + 169.255.0.0/16\n- 170.0.0.0/7\n- 172.0.0.0/6\n- 176.0.0.0/4\n- 192.0.0.0/3\n- + 224.0.0.0/3\n \nmasqLinkLocal: true\nmasqLinkLocalIPv6: true\n" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: azure-dns-imds-ip-masq-agent-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/cilium-config.yaml b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/cilium-config.yaml new file mode 100644 index 0000000000..33438fb05c --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/static/cilium-config.yaml @@ -0,0 +1,173 @@ +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-algorithm-annotation: "false" + bpf-events-drop-enabled: "true" + bpf-events-policy-verdict-enabled: "true" + bpf-events-trace-enabled: "true" + bpf-lb-acceleration: disabled + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode: snat + bpf-lb-mode-annotation: "false" + bpf-lb-sock: "false" + bpf-lb-sock-hostns-only: "true" + bpf-lb-sock-terminate-pod-connections: "false" + bpf-lb-source-range-all-types: "false" + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + ces-slice-mode: fcfs + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: "0" + cluster-name: alewpodsubebpfcilcanary + cni-exclusive: "false" + cni-log-file: /var/run/cilium/cilium-cni.log + datapath-mode: veth + debug: "false" + direct-routing-skip-unreachable: "false" + disable-cnp-status-updates: "true" + disable-embedded-dns-proxy: "false" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "false" + egress-gateway-reconciliation-trigger-interval: 1s + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-bpf-masquerade: "true" + enable-cilium-endpoint-slice: "true" + enable-endpoint-health-checking: "false" + enable-endpoint-lockdown-on-policy-overflow: "false" + enable-endpoint-routes: "true" + enable-experimental-lb: "false" + enable-health-check-loadbalancer-ip: "false" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-host-legacy-routing: "false" + enable-hubble: "true" + enable-hubble-open-metrics: "false" + enable-internal-traffic-policy: "true" + enable-ip-masq-agent: "true" + enable-ipv4: "true" + enable-ipv4-big-tcp: "false" + enable-ipv4-masquerade: "true" + enable-ipv6: "false" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "false" + enable-k8s-networkpolicy: "true" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "true" + enable-lb-ipam: "false" + enable-local-node-route: "false" + enable-local-redirect-policy: "true" + enable-masquerade-to-route-source: "false" + enable-metrics: "true" + enable-node-selector-labels: "false" + enable-non-default-deny-policies: "true" + enable-policy: default + enable-remote-node-identity: "true" + enable-remote-node-masquerade: "false" + enable-runtime-device-detection: "false" + enable-sctp: "false" + enable-session-affinity: "true" + enable-source-ip-verification: "false" + enable-standalone-dns-proxy: "true" + enable-svc-source-range-check: "true" + enable-tcx: "false" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-wireguard: "false" + enable-xt-socket-fallback: "true" + external-envoy-proxy: "false" + health-check-icmp-failure-threshold: "3" + hubble-disable-tls: "false" + hubble-event-buffer-capacity: "4095" + hubble-export-file-max-backups: "5" + hubble-export-file-max-size-mb: "10" + hubble-flowlogs-config-path: /flowlog-config/flowlogs.yaml + hubble-listen-address: :4244 + hubble-metrics: flow:sourceEgressContext=pod;destinationIngressContext=pod tcp:sourceEgressContext=pod;destinationIngressContext=pod + drop:sourceEgressContext=pod;destinationIngressContext=pod dns:sourceEgressContext=pod;destinationIngressContext=pod + hubble-metrics-server: :9965 + hubble-socket-path: /var/run/cilium/hubble.sock + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: delegated-plugin + ipam-cilium-node-update-rate: 15s + ipv4-native-routing-cidr: 10.241.0.0/16 + k8s-client-burst: "20" + k8s-client-qps: "10" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: 0.0.0.0:10256 + local-router-ipv4: 169.254.23.0 + mesh-auth-enabled: "false" + mesh-auth-gc-interval: 5m0s + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + metrics: +cilium_bpf_map_pressure +cilium_proxy_datapath_update_timeout_total + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + nat-map-stats-entries: "32" + nat-map-stats-interval: 30s + node-port-bind-protection: "true" + nodeport-addresses: "" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + operator-prometheus-serve-addr: :9963 + preallocate-bpf-maps: "false" + procfs: /host/proc + prometheus-serve-addr: :9962 + proxy-connect-timeout: "2" + proxy-idle-timeout-seconds: "60" + proxy-max-connection-duration-seconds: "0" + proxy-max-requests-per-connection: "0" + proxy-xff-num-trusted-hops-egress: "0" + proxy-xff-num-trusted-hops-ingress: "0" + remove-cilium-node-taints: "true" + routing-mode: native + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" + tofqdns-proxy-port: "40046" + tofqdns-proxy-response-max-delay: 100ms + tofqdns-server-port: "40045" + unmanaged-pod-watcher-interval: "0" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + ## new values for 1.18 + # bpf-policy-stats-map-max specifies the maximum number of entries in global + # policy stats map + bpf-policy-stats-map-max: "65536" + identity-management-mode: "agent" + tofqdns-preallocate-identities: "true" + policy-default-local-cluster: "false" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/actually-managed-by: Eno + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system diff --git a/test/integration/manifests/cilium/v1.18/hubble/hubble-peer-svc.yaml b/test/integration/manifests/cilium/v1.18/hubble/hubble-peer-svc.yaml new file mode 100644 index 0000000000..6ba733885c --- /dev/null +++ b/test/integration/manifests/cilium/v1.18/hubble/hubble-peer-svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: cilium + name: hubble-peer + namespace: kube-system +spec: + internalTrafficPolicy: Cluster + ports: + - name: peer-service + port: 443 + protocol: TCP + targetPort: 4244 + selector: + k8s-app: cilium + sessionAffinity: None + type: ClusterIP From e4fe6eaaea967f7dff979b2d1d33b1a0186e24ca Mon Sep 17 00:00:00 2001 From: rejain789 Date: Wed, 26 Nov 2025 11:25:35 -0800 Subject: [PATCH 25/47] [NPM Lite] Bypassing IPSets for IP CIDR Block Based Network Policies (#4107) * added logic to bypass ipsets for /32 cidrs with npm lite * removed logic to only look at /32 pod cidrs and allow all pod cidr * updated code specific to direct ip logic * fixed if else logic * added error for named port * get rid of unneeded comments * got rid of function in utils that was not neede * added unit test for translate policy * resolved pr comments * resolved copilot comments * fixed golinter --- .../translation/translatePolicy.go | 75 +++- .../translation/translatePolicy_test.go | 375 ++++++++++-------- npm/pkg/dataplane/policies/policy.go | 8 +- npm/pkg/dataplane/policies/policy_windows.go | 32 +- 4 files changed, 298 insertions(+), 192 deletions(-) diff --git a/npm/pkg/controlplane/translation/translatePolicy.go b/npm/pkg/controlplane/translation/translatePolicy.go index 9b029b4616..ac156d7592 100644 --- a/npm/pkg/controlplane/translation/translatePolicy.go +++ b/npm/pkg/controlplane/translation/translatePolicy.go @@ -349,7 +349,7 @@ func peerAndPortRule(npmNetPol *policies.NPMNetworkPolicy, direction policies.Di return err } - err = checkForNamedPortType(portKind, npmLiteToggle) + err = checkForNamedPortType(npmNetPol, portKind, npmLiteToggle, direction, &ports[i], "") if err != nil { return err } @@ -362,6 +362,50 @@ func peerAndPortRule(npmNetPol *policies.NPMNetworkPolicy, direction policies.Di return nil } +func directPeerAndPortAllowRule(npmNetPol *policies.NPMNetworkPolicy, direction policies.Direction, ports []networkingv1.NetworkPolicyPort, cidr string, npmLiteToggle bool) error { + if len(ports) == 0 { + acl := policies.NewACLPolicy(policies.Allowed, direction) + // bypasses ipset creation for /32 cidrs and directly creates an acl with the cidr + if direction == policies.Ingress { + acl.SrcDirectIPs = []string{cidr} + } else { + acl.DstDirectIPs = []string{cidr} + } + npmNetPol.ACLs = append(npmNetPol.ACLs, acl) + return nil + } + // handle each port separately + for i := range ports { + portKind, err := portType(ports[i]) + if err != nil { + return err + } + + err = checkForNamedPortType(npmNetPol, portKind, npmLiteToggle, direction, &ports[i], cidr) + if err != nil { + return err + } + + acl := policies.NewACLPolicy(policies.Allowed, direction) + + // Set direct IP based on direction + if direction == policies.Ingress { + acl.SrcDirectIPs = []string{cidr} + } else { + acl.DstDirectIPs = []string{cidr} + } + + // Handle ports + if portKind == numericPortType { + portInfo, protocol := numericPortRule(&ports[i]) + acl.DstPorts = portInfo + acl.Protocol = policies.Protocol(protocol) + } + npmNetPol.ACLs = append(npmNetPol.ACLs, acl) + } + return nil +} + // translateRule translates ingress or egress rules and update npmNetPol object. func translateRule(npmNetPol *policies.NPMNetworkPolicy, netPolName string, @@ -405,6 +449,14 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, // #2.1 Handle IPBlock and port if exist if peer.IPBlock != nil { if len(peer.IPBlock.CIDR) > 0 { + if npmLiteToggle { + err = directPeerAndPortAllowRule(npmNetPol, direction, ports, peer.IPBlock.CIDR, npmLiteToggle) + if err != nil { + return err + } + continue + } + ipBlockIPSet, ipBlockSetInfo, err := ipBlockRule(netPolName, npmNetPol.Namespace, direction, matchType, ruleIndex, peerIdx, peer.IPBlock) if err != nil { return err @@ -417,12 +469,6 @@ func translateRule(npmNetPol *policies.NPMNetworkPolicy, } } - // if npm lite is configured, check network policy only consists of CIDR blocks - err := npmLiteValidPolicy(peer, npmLiteToggle) - if err != nil { - return err - } - // Do not need to run below code to translate PodSelector and NamespaceSelector // since IPBlock field is exclusive in NetworkPolicyPeer (i.e., peer in this code). @@ -642,17 +688,10 @@ func TranslatePolicy(npObj *networkingv1.NetworkPolicy, npmLiteToggle bool) (*po return npmNetPol, nil } -// validates only CIDR based peer is present + no combination of CIDR with pod/namespace selectors are present -func npmLiteValidPolicy(peer networkingv1.NetworkPolicyPeer, npmLiteEnabled bool) error { - if npmLiteEnabled && (peer.PodSelector != nil || peer.NamespaceSelector != nil) { - return ErrUnsupportedNonCIDR - } - return nil -} - -func checkForNamedPortType(portKind netpolPortType, npmLiteToggle bool) error { +func checkForNamedPortType(npmNetPol *policies.NPMNetworkPolicy, portKind netpolPortType, npmLiteToggle bool, direction policies.Direction, port *networkingv1.NetworkPolicyPort, cidr string) error { if npmLiteToggle && portKind == namedPortType { - return ErrUnsupportedNonCIDR + return fmt.Errorf("named port not supported in policy %s (namespace: %s, direction: %s, cidr: %s, port: %v, protocol: %v): %w", + npmNetPol.PolicyKey, npmNetPol.Namespace, direction, cidr, port.Port, port.Protocol, ErrUnsupportedNamedPort) } return nil } @@ -673,7 +712,7 @@ func checkOnlyPortRuleExists( if err != nil { return err } - err = checkForNamedPortType(portKind, npmLiteToggle) + err = checkForNamedPortType(npmNetPol, portKind, npmLiteToggle, direction, &ports[i], "") if err != nil { return err } diff --git a/npm/pkg/controlplane/translation/translatePolicy_test.go b/npm/pkg/controlplane/translation/translatePolicy_test.go index dc49c7bec3..0fa8fa11f7 100644 --- a/npm/pkg/controlplane/translation/translatePolicy_test.go +++ b/npm/pkg/controlplane/translation/translatePolicy_test.go @@ -1458,6 +1458,194 @@ func TestPeerAndPortRule(t *testing.T) { } } +func TestDirectPeerAndPortAllowRule(t *testing.T) { + namedPort := intstr.FromString(namedPortStr) + port8000 := intstr.FromInt(8000) + var endPort int32 = 8100 + tcp := v1.ProtocolTCP + + tests := []struct { + name string + direction policies.Direction + ports []networkingv1.NetworkPolicyPort + cidr string + npmNetPol *policies.NPMNetworkPolicy + skipWindows bool + }{ + { + name: "egress tcp port 8000-8100 with /28 subnet", + direction: policies.Egress, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &tcp, + Port: &port8000, + EndPort: &endPort, + }, + }, + cidr: "10.0.1.0/28", + npmNetPol: &policies.NPMNetworkPolicy{ + Namespace: defaultNS, + PolicyKey: namedPortPolicyKey, + ACLPolicyID: fmt.Sprintf("azure-acl-%s-%s", defaultNS, namedPortPolicyKey), + ACLs: []*policies.ACLPolicy{ + { + Target: policies.Allowed, + Direction: policies.Egress, + DstDirectIPs: []string{"10.0.1.0/28"}, + DstPorts: policies.Ports{ + Port: 8000, + EndPort: 8100, + }, + Protocol: "TCP", + }, + }, + }, + }, + { + name: "ingress no ports - single IP (/32)", + direction: policies.Ingress, + ports: []networkingv1.NetworkPolicyPort{}, + cidr: "10.226.0.49/32", + npmNetPol: &policies.NPMNetworkPolicy{ + Namespace: defaultNS, + PolicyKey: namedPortPolicyKey, + ACLPolicyID: fmt.Sprintf("azure-acl-%s-%s", defaultNS, namedPortPolicyKey), + ACLs: []*policies.ACLPolicy{ + { + Target: policies.Allowed, + Direction: policies.Ingress, + SrcDirectIPs: []string{"10.226.0.49/32"}, + }, + }, + }, + }, + { + name: "egress no ports - subnet (/24)", + direction: policies.Egress, + ports: []networkingv1.NetworkPolicyPort{}, + cidr: "192.168.1.0/24", + npmNetPol: &policies.NPMNetworkPolicy{ + Namespace: defaultNS, + PolicyKey: namedPortPolicyKey, + ACLPolicyID: fmt.Sprintf("azure-acl-%s-%s", defaultNS, namedPortPolicyKey), + ACLs: []*policies.ACLPolicy{ + { + Target: policies.Allowed, + Direction: policies.Egress, + DstDirectIPs: []string{"192.168.1.0/24"}, + }, + }, + }, + }, + { + name: "ingress no ports - large subnet (/16)", + direction: policies.Ingress, + ports: []networkingv1.NetworkPolicyPort{}, + cidr: "172.16.0.0/16", + npmNetPol: &policies.NPMNetworkPolicy{ + Namespace: defaultNS, + PolicyKey: namedPortPolicyKey, + ACLPolicyID: fmt.Sprintf("azure-acl-%s-%s", defaultNS, namedPortPolicyKey), + ACLs: []*policies.ACLPolicy{ + { + Target: policies.Allowed, + Direction: policies.Ingress, + SrcDirectIPs: []string{"172.16.0.0/16"}, + }, + }, + }, + }, + { + name: "egress tcp port 8000-8100 with /28 subnet", + direction: policies.Egress, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &tcp, + Port: &port8000, + EndPort: &endPort, + }, + }, + cidr: "10.0.1.0/28", + npmNetPol: &policies.NPMNetworkPolicy{ + Namespace: defaultNS, + PolicyKey: namedPortPolicyKey, + ACLPolicyID: fmt.Sprintf("azure-acl-%s-%s", defaultNS, namedPortPolicyKey), + ACLs: []*policies.ACLPolicy{ + { + Target: policies.Allowed, + Direction: policies.Egress, + DstDirectIPs: []string{"10.0.1.0/28"}, + DstPorts: policies.Ports{ + Port: 8000, + EndPort: 8100, + }, + Protocol: "TCP", + }, + }, + }, + }, + { + name: "ingress udp port 53 with /32", + direction: policies.Ingress, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &[]v1.Protocol{v1.ProtocolUDP}[0], + Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, + }, + }, + cidr: "8.8.8.8/32", + npmNetPol: &policies.NPMNetworkPolicy{ + Namespace: defaultNS, + PolicyKey: namedPortPolicyKey, + ACLPolicyID: fmt.Sprintf("azure-acl-%s-%s", defaultNS, namedPortPolicyKey), + ACLs: []*policies.ACLPolicy{ + { + Target: policies.Allowed, + Direction: policies.Ingress, + SrcDirectIPs: []string{"8.8.8.8/32"}, + DstPorts: policies.Ports{ + Port: 53, + EndPort: 0, + }, + Protocol: "UDP", + }, + }, + }, + }, + { + name: "named port should fail in NPM Lite", + direction: policies.Ingress, + ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: &tcp, + Port: &namedPort, + }, + }, + cidr: "10.226.0.49/32", + skipWindows: true, // Should fail on both platforms + }, + } + + for _, tt := range tests { + npmLiteToggle := true + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + npmNetPol := &policies.NPMNetworkPolicy{ + Namespace: defaultNS, + PolicyKey: namedPortPolicyKey, + ACLPolicyID: fmt.Sprintf("azure-acl-%s-%s", defaultNS, namedPortPolicyKey), + } + err := directPeerAndPortAllowRule(npmNetPol, tt.direction, tt.ports, tt.cidr, npmLiteToggle) + if tt.skipWindows { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.npmNetPol, npmNetPol) + } + }) + } +} + func TestIngressPolicy(t *testing.T) { tcp := v1.ProtocolTCP targetPodMatchType := policies.EitherMatch @@ -2921,169 +3109,6 @@ func TestEgressPolicy(t *testing.T) { } } -func TestNpmLiteCidrPolicy(t *testing.T) { - // Test 1) Npm lite enabled, CIDR + Namespace label Peers, returns error - // Test 2) NPM lite disabled, CIDR + Namespace label Peers, returns no error - // Test 3) Npm Lite enabled, CIDR Peers , returns no error - // Test 4) NPM Lite enabled, Combination of CIDR + Label in same peer, returns an error - // test 5) NPM Lite enabled, no peer, returns no error - // test 6) NPM Lite enabled, no cidr, no peer, only ports + protocol - - port8000 := intstr.FromInt(8000) - tcp := v1.ProtocolTCP - tests := []struct { - name string - targetSelector *metav1.LabelSelector - ports []networkingv1.NetworkPolicyPort - peersFrom []networkingv1.NetworkPolicyPeer - peersTo []networkingv1.NetworkPolicyPeer - npmLiteEnabled bool - wantErr bool - }{ - { - name: "CIDR + port + namespace", - targetSelector: nil, - ports: []networkingv1.NetworkPolicyPort{ - { - Protocol: &tcp, - Port: &port8000, - }, - }, - peersFrom: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "peer-nsselector-kay": "peer-nsselector-value", - }, - }, - }, - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "172.17.0.0/16", - Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, - }, - }, - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "172.17.0.0/16", - }, - }, - }, - peersTo: []networkingv1.NetworkPolicyPeer{}, - npmLiteEnabled: true, - wantErr: true, - }, - { - name: "cidr + namespace label + disabledLite ", - targetSelector: nil, - peersFrom: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "peer-nsselector-kay": "peer-nsselector-value", - }, - }, - }, - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "172.17.0.0/16", - Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, - }, - }, - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "172.17.0.0/16", - }, - }, - }, - peersTo: []networkingv1.NetworkPolicyPeer{}, - npmLiteEnabled: false, - wantErr: false, - }, - { - name: "CIDR Only", - targetSelector: nil, - peersFrom: []networkingv1.NetworkPolicyPeer{ - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "172.17.0.0/16", - Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, - }, - }, - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "172.17.0.0/16", - }, - }, - }, - peersTo: []networkingv1.NetworkPolicyPeer{}, - npmLiteEnabled: true, - wantErr: false, - }, - { - name: "CIDR + namespace labels", - targetSelector: nil, - peersFrom: []networkingv1.NetworkPolicyPeer{ - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "172.17.0.0/17", - Except: []string{"172.17.1.0/24", "172.17.2.0/24"}, - }, - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "peer-nsselector-kay": "peer-nsselector-value", - }, - }, - }, - }, - peersTo: []networkingv1.NetworkPolicyPeer{}, - npmLiteEnabled: true, - wantErr: true, - }, - { - name: "no peers", - targetSelector: nil, - peersFrom: []networkingv1.NetworkPolicyPeer{}, - peersTo: []networkingv1.NetworkPolicyPeer{}, - npmLiteEnabled: true, - wantErr: false, - }, - { - name: "port only", - targetSelector: nil, - ports: []networkingv1.NetworkPolicyPort{ - { - Protocol: &tcp, - Port: &port8000, - }, - }, - peersFrom: []networkingv1.NetworkPolicyPeer{}, - peersTo: []networkingv1.NetworkPolicyPeer{}, - npmLiteEnabled: true, - wantErr: false, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - // run the function passing in peers and a flag indicating whether npm lite is enabled - var err error - for _, peer := range tt.peersFrom { - err = npmLiteValidPolicy(peer, tt.npmLiteEnabled) - if err != nil { - break - } - } - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} - func TestCheckForNamedPortType(t *testing.T) { port8000 := intstr.FromInt(8000) namedPort := intstr.FromString("namedPort") @@ -3127,8 +3152,28 @@ func TestCheckForNamedPortType(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - // run the function passing in peers and a flag indicating whether npm lite is enabled - err := checkForNamedPortType(tt.portKind, tt.npmLiteEnabled) + // Create a mock NPM network policy for testing + npmNetPol := &policies.NPMNetworkPolicy{ + PolicyKey: "test-policy/test", + Namespace: "test-namespace", + } + + // Use the first port from test data, or create a default one if ports are empty + var testPort *networkingv1.NetworkPolicyPort + if len(tt.ports) > 0 { + testPort = &tt.ports[0] + } else { + // Create a default port for tests without specific port data + port := intstr.FromInt(8080) + protocol := v1.ProtocolTCP + testPort = &networkingv1.NetworkPolicyPort{ + Protocol: &protocol, + Port: &port, + } + } + + // run the function passing in all required parameters + err := checkForNamedPortType(npmNetPol, tt.portKind, tt.npmLiteEnabled, policies.Ingress, testPort, "10.0.0.0/24") if tt.wantErr { require.Error(t, err) } else { diff --git a/npm/pkg/dataplane/policies/policy.go b/npm/pkg/dataplane/policies/policy.go index 646a03633a..d36b3530c5 100644 --- a/npm/pkg/dataplane/policies/policy.go +++ b/npm/pkg/dataplane/policies/policy.go @@ -114,6 +114,10 @@ type ACLPolicy struct { SrcList []SetInfo // DstList destination IPSets condition setinfos DstList []SetInfo + // SrcDirectIPs holds direct IPs for source matching (used for /32 CIDRs on Windows with npm lite enabled) + SrcDirectIPs []string + // DstDirectIPs holds direct IPs for destination matching (used for /32 CIDRs on Windows with npm lite enabled) + DstDirectIPs []string // Target defines a target in iptables for linux. i,e, Mark, Accept, Drop // in windows, this is either ALLOW or DENY Target Verdict @@ -282,7 +286,9 @@ func translatedIPSetsToString(items []*ipsets.TranslatedIPSet) string { // Included is false when match set have "!". // MatchType captures match direction flags. // For example match set in linux: -// ! azure-npm-123 src +// +// ! azure-npm-123 src +// // "!" this indicates a negative match (Included is false) of an azure-npm-123 // MatchType is "src" type SetInfo struct { diff --git a/npm/pkg/dataplane/policies/policy_windows.go b/npm/pkg/dataplane/policies/policy_windows.go index 5f4fbd16ff..9e23dcb92d 100644 --- a/npm/pkg/dataplane/policies/policy_windows.go +++ b/npm/pkg/dataplane/policies/policy_windows.go @@ -3,6 +3,7 @@ package policies import ( "errors" "fmt" + "strings" "github.com/Azure/azure-container-networking/npm/pkg/dataplane/ipsets" "github.com/Microsoft/hcsshim/hcn" @@ -100,12 +101,7 @@ func (acl *ACLPolicy) convertToAclSettings(aclID string) (*NPMACLPolSettings, er // Ignore adding ruletype for now as there is a bug // policySettings.RuleType = hcn.RuleTypeSwitch - // ACLPolicy settings uses ID field of SetPolicy in LocalAddresses or RemoteAddresses - srcListStr := getAddrListFromSetInfo(acl.SrcList) - dstListStr := getAddrListFromSetInfo(acl.DstList) - dstPortStr := getPortStrFromPorts(acl.DstPorts) - - // HNS has confusing Local and Remote address defintions + // HNS has confusing Local and Remote address definitions // For Traffic Direction INGRESS // LocalAddresses = Source Sets // RemoteAddresses = Destination Sets @@ -126,8 +122,28 @@ func (acl *ACLPolicy) convertToAclSettings(aclID string) (*NPMACLPolSettings, er // LocalAddresses = Destination IPs // RemoteAddresses = Source IPs - policySettings.LocalAddresses = srcListStr - policySettings.RemoteAddresses = dstListStr + var srcListStr, dstListStr string + // if direct IPs are used, we leave local addresses to be an empty string + if len(acl.SrcDirectIPs) > 0 || len(acl.DstDirectIPs) > 0 { + srcListStr = strings.Join(acl.SrcDirectIPs, ",") + dstListStr = strings.Join(acl.DstDirectIPs, ",") + policySettings.LocalAddresses = "" + if policySettings.Direction == hcn.DirectionTypeOut { + // EGRESS: Remote = Destination IPs from policy + policySettings.RemoteAddresses = dstListStr + } else { + // INGRESS: Remote = Source IPs from policy + policySettings.RemoteAddresses = srcListStr + } + } else { + // Original IPSet-based approach + srcListStr = getAddrListFromSetInfo(acl.SrcList) + dstListStr = getAddrListFromSetInfo(acl.DstList) + policySettings.LocalAddresses = srcListStr + policySettings.RemoteAddresses = dstListStr + } + + dstPortStr := getPortStrFromPorts(acl.DstPorts) // Switch ports based on direction policySettings.RemotePorts = "" From 204ff19328d54b39af754b1c398e6de4a622443e Mon Sep 17 00:00:00 2001 From: Santhosh Prabhu <6684582+santhoshmprabhu@users.noreply.github.com> Date: Thu, 4 Dec 2025 11:41:13 -0800 Subject: [PATCH 26/47] fix: do not copy empty values into secondary IP configs (#4155) * fix: do not copy empty values into secondary IP configs * chore: cleanup * fix: keep loadBalanacerBackendAddressPools --- .../integration/cilium-nodesubnet/ipconfigupdate.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/test/integration/cilium-nodesubnet/ipconfigupdate.go b/test/integration/cilium-nodesubnet/ipconfigupdate.go index 214f4655f7..1acf5822dc 100644 --- a/test/integration/cilium-nodesubnet/ipconfigupdate.go +++ b/test/integration/cilium-nodesubnet/ipconfigupdate.go @@ -100,14 +100,21 @@ func main() { } if primaryIPConfig != nil { - for i := 2; i <= secondaryConfigCount+1; i++ { + ipConfigurations = []interface{}{} + for i := 1; i <= secondaryConfigCount+1; i++ { ipConfig := make(map[string]interface{}) for k, v := range primaryIPConfig { + // Skip nil values. + if v == nil { + continue + } + // only the primary config needs loadBalancerBackendAddressPools. Azure doesn't allow // secondary IP configs to be associated load balancer backend pools. - if k == "loadBalancerBackendAddressPools" { + if i > 1 && k == "loadBalancerBackendAddressPools" { continue } + ipConfig[k] = v } @@ -117,6 +124,8 @@ func main() { ipConfig["primary"] = false usedIPConfigNames = append(usedIPConfigNames, ipConfigName) secondaryConfigs = append(secondaryConfigs, ipConfig) + } else { + ipConfigurations = append(ipConfigurations, ipConfig) } } } From 4ca7012b7871381cad6ec4ef0fc3f898c19f78f8 Mon Sep 17 00:00:00 2001 From: John Payne <89417863+jpayne3506@users.noreply.github.com> Date: Thu, 4 Dec 2025 13:43:14 -0600 Subject: [PATCH 27/47] fix: Re-enable codespaces (#4124) * test: 1.24.1 * test: bullseye * test: trixie | 6.12 * test: revert devcontainers/features/go version * test: revert trixie, no build * ci: bump devcontainer go features to 1.24.1 --- .devcontainer/devcontainer.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d81b9acd61..dd69f1c3db 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,7 +5,7 @@ "build": { "dockerfile": "Dockerfile", "args": { - "VARIANT": "1.24", + "VARIANT": "1.24-bullseye", "NODE_VERSION": "none" } }, @@ -58,7 +58,7 @@ "remoteUser": "vscode", "features": { "ghcr.io/devcontainers/features/go:1": { - "version": "1.23.2" + "version": "1.24.1" }, "docker-in-docker": "latest", "kubectl-helm-minikube": "latest", From 26a491a2e0eeecf7533b2398e1a724f886e15849 Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Thu, 4 Dec 2025 11:46:39 -0800 Subject: [PATCH 28/47] fix: set logger for controller-runtime (#4123) Signed-off-by: GitHub --- cns/service/main.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cns/service/main.go b/cns/service/main.go index d7b9a526d5..e980360cf6 100644 --- a/cns/service/main.go +++ b/cns/service/main.go @@ -86,6 +86,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) @@ -851,6 +852,9 @@ func main() { // Add APIServer FQDN to Log metadata logger.Log.SetAPIServer(os.Getenv("KUBERNETES_SERVICE_HOST")) + // set logger in ctrlruntime + ctrllog.SetLogger(zapr.NewLogger(z)) + // Check the CNI statefile mount, and if the file is empty // stub an empty JSON object if err := cnipodprovider.WriteObjectToCNIStatefile(); err != nil { //nolint:govet //shadow okay From 606ee06bc94a6c3d8441b40979223d9bdb02d36e Mon Sep 17 00:00:00 2001 From: karina-ranadive <77542199+karina-ranadive@users.noreply.github.com> Date: Thu, 4 Dec 2025 17:49:14 -0500 Subject: [PATCH 29/47] test: expand LRP test to include lifecycle events (#4086) * test: expand LRP test to include lifecycle events * fix * adjustments * fix * removed redundant basic lrp test * changed retry to 1 minute --------- Co-authored-by: Karina Ranadive --- test/integration/lrp/lrp_fqdn_test.go | 2 +- test/integration/lrp/lrp_test.go | 312 +++++++++++++++++- .../v1.13/cilium-config/cilium-config.yaml | 2 +- test/internal/kubernetes/utils.go | 18 + 4 files changed, 321 insertions(+), 13 deletions(-) diff --git a/test/integration/lrp/lrp_fqdn_test.go b/test/integration/lrp/lrp_fqdn_test.go index 93bca2439b..18ddc1caba 100644 --- a/test/integration/lrp/lrp_fqdn_test.go +++ b/test/integration/lrp/lrp_fqdn_test.go @@ -102,7 +102,7 @@ func TestLRPFQDN(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - testLRPCase(t, ctx, *selectedPod, tt.command, tt.expectedMsgContains, tt.expectedErrMsgContains, tt.shouldError, tt.countIncreases) + testLRPCase(t, ctx, *selectedPod, tt.command, tt.expectedMsgContains, tt.expectedErrMsgContains, tt.shouldError, tt.countIncreases, getPrometheusAddress(initialPrometheusPort)) }) } } diff --git a/test/integration/lrp/lrp_test.go b/test/integration/lrp/lrp_test.go index 59fd974114..9d473f9188 100644 --- a/test/integration/lrp/lrp_test.go +++ b/test/integration/lrp/lrp_test.go @@ -4,6 +4,7 @@ package lrp import ( "context" + "fmt" "os" "strings" "testing" @@ -13,11 +14,16 @@ import ( "github.com/Azure/azure-container-networking/test/integration/prometheus" "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/Azure/azure-container-networking/test/internal/retry" + ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" ciliumClientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" "github.com/pkg/errors" "github.com/stretchr/testify/require" "golang.org/x/exp/rand" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/yaml" ) const ( @@ -28,11 +34,13 @@ const ( dnsService = "kube-dns" retryAttempts = 10 retryDelay = 5 * time.Second - promAddress = "http://localhost:9253/metrics" nodeLocalDNSLabelSelector = "k8s-app=node-local-dns" clientLabelSelector = "lrp-test=true" coreDNSRequestCountTotal = "coredns_dns_request_count_total" clientContainer = "no-op" + // Port constants for prometheus endpoints + initialPrometheusPort = 9253 + recreatedPrometheusPort = 9254 ) var ( @@ -47,6 +55,11 @@ var ( clientPath = ciliumManifestsDir + "client-ds.yaml" ) +// getPrometheusAddress returns the prometheus metrics URL for the given port +func getPrometheusAddress(port int) string { + return fmt.Sprintf("http://localhost:%d/metrics", port) +} + func setupLRP(t *testing.T, ctx context.Context) (*corev1.Pod, func()) { var cleanUpFns []func() success := false @@ -132,8 +145,8 @@ func setupLRP(t *testing.T, ctx context.Context) (*corev1.Pod, func()) { pf, err := k8s.NewPortForwarder(config, k8s.PortForwardingOpts{ Namespace: nodeLocalDNSDS.Namespace, PodName: selectedLocalDNSPod, - LocalPort: 9253, - DestPort: 9253, + LocalPort: initialPrometheusPort, + DestPort: initialPrometheusPort, }) require.NoError(t, err) pctx := context.Background() @@ -154,7 +167,7 @@ func setupLRP(t *testing.T, ctx context.Context) (*corev1.Pod, func()) { } func testLRPCase(t *testing.T, ctx context.Context, clientPod corev1.Pod, clientCmd []string, expectResponse, expectErrMsg string, - shouldError, countShouldIncrease bool) { + shouldError, countShouldIncrease bool, prometheusAddress string) { config := kubernetes.MustGetRestConfig() cs := kubernetes.MustGetClientset() @@ -167,9 +180,11 @@ func testLRPCase(t *testing.T, ctx context.Context, clientPod corev1.Pod, client "zone": ".", } - // curl localhost:9253/metrics - beforeMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels) + // curl to the specified prometheus address + beforeMetric, err := prometheus.GetMetric(prometheusAddress, coreDNSRequestCountTotal, metricLabels) require.NoError(t, err) + beforeValue := beforeMetric.GetCounter().GetValue() + t.Logf("Before DNS request - metric count: %.0f", beforeValue) t.Log("calling command from client") @@ -187,13 +202,15 @@ func testLRPCase(t *testing.T, ctx context.Context, clientPod corev1.Pod, client time.Sleep(500 * time.Millisecond) // curl again and see count diff - afterMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels) + afterMetric, err := prometheus.GetMetric(prometheusAddress, coreDNSRequestCountTotal, metricLabels) require.NoError(t, err) + afterValue := afterMetric.GetCounter().GetValue() + t.Logf("After DNS request - metric count: %.0f (diff: %.0f)", afterValue, afterValue-beforeValue) if countShouldIncrease { - require.Greater(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count did not increase after command") + require.Greater(t, afterValue, beforeValue, "dns metric count did not increase after command - before: %.0f, after: %.0f", beforeValue, afterValue) } else { - require.Equal(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count increased after command") + require.Equal(t, afterValue, beforeValue, "dns metric count increased after command - before: %.0f, after: %.0f", beforeValue, afterValue) } } @@ -210,9 +227,282 @@ func TestLRP(t *testing.T) { defer cleanupFn() require.NotNil(t, selectedPod) + // Get the kube-dns service IP for DNS requests + cs := kubernetes.MustGetClientset() + svc, err := kubernetes.GetService(ctx, cs, kubeSystemNamespace, dnsService) + require.NoError(t, err) + kubeDNS := svc.Spec.ClusterIP + + t.Logf("LRP Test Starting...") + + // Basic LRP test - using initial port from setupLRP testLRPCase(t, ctx, *selectedPod, []string{ - "nslookup", "google.com", "10.0.0.10", - }, "", "", false, true) + "nslookup", "google.com", kubeDNS, + }, "", "", false, true, getPrometheusAddress(initialPrometheusPort)) + + t.Logf("LRP Test Completed") + + t.Logf("LRP Lifecycle Test Starting") + + // Run LRP Lifecycle test + testLRPLifecycle(t, ctx, *selectedPod, kubeDNS) + + t.Logf("LRP Lifecycle Test Completed") +} + +// testLRPLifecycle performs testing of Local Redirect Policy functionality +// including pod restarts, resource recreation, and cilium command validation +func testLRPLifecycle(t *testing.T, ctx context.Context, clientPod corev1.Pod, kubeDNS string) { + config := kubernetes.MustGetRestConfig() + cs := kubernetes.MustGetClientset() + + + // Step 1: Validate LRP using cilium commands + t.Log("Step 1: Validating LRP using cilium commands") + validateCiliumLRP(t, ctx, cs, config) + + // Step 2: Restart busybox pods and verify LRP still works + t.Log("Step 2: Restarting client pods to test persistence") + restartedPod := restartClientPodsAndGetPod(t, ctx, cs, clientPod) + + // Step 3: Verify metrics after restart + t.Log("Step 3: Verifying LRP functionality after pod restart") + testLRPCase(t, ctx, restartedPod, []string{ + "nslookup", "google.com", kubeDNS, + }, "", "", false, true, getPrometheusAddress(initialPrometheusPort)) + + // Step 4: Validate cilium commands still show LRP + t.Log("Step 4: Re-validating cilium LRP after restart") + validateCiliumLRP(t, ctx, cs, config) + + // Step 5: Delete and recreate resources & restart nodelocaldns daemonset + t.Log("Step 5: Testing resource deletion and recreation") + recreatedPod := deleteAndRecreateResources(t, ctx, cs, clientPod) + + // Step 6: Re-establish port forward to new node-local-dns pod and validate metrics + t.Log("Step 6: Re-establishing port forward to new node-local-dns pod for metrics validation") + + // Get the new node-local-dns pod on the same node as our recreated client pod + nodeName := recreatedPod.Spec.NodeName + newNodeLocalDNSPods, err := kubernetes.GetPodsByNode(ctx, cs, kubeSystemNamespace, nodeLocalDNSLabelSelector, nodeName) + require.NoError(t, err) + require.NotEmpty(t, newNodeLocalDNSPods.Items, "No node-local-dns pod found on node %s after restart", nodeName) + + newNodeLocalDNSPod := TakeOne(newNodeLocalDNSPods.Items) + t.Logf("Setting up port forward to new node-local-dns pod: %s", newNodeLocalDNSPod.Name) + + // Setup new port forward to the new node-local-dns pod + newPf, err := k8s.NewPortForwarder(config, k8s.PortForwardingOpts{ + Namespace: newNodeLocalDNSPod.Namespace, + PodName: newNodeLocalDNSPod.Name, + LocalPort: recreatedPrometheusPort, // Use different port to avoid conflicts + DestPort: initialPrometheusPort, + }) + require.NoError(t, err) + + newPortForwardCtx, newCancel := context.WithTimeout(ctx, (retryAttempts+1)*retryDelay) + defer newCancel() + + err = defaultRetrier.Do(newPortForwardCtx, func() error { + t.Logf("attempting port forward to new node-local-dns pod %s...", newNodeLocalDNSPod.Name) + return errors.Wrap(newPf.Forward(newPortForwardCtx), "could not start port forward to new pod") + }) + require.NoError(t, err, "could not start port forward to new node-local-dns pod") + defer newPf.Stop() + + t.Log("Port forward to new node-local-dns pod established") + + // Use testLRPCase function with the new prometheus address + t.Log("Validating metrics with new node-local-dns pod") + testLRPCase(t, ctx, recreatedPod, []string{ + "nslookup", "github.com", kubeDNS, + }, "", "", false, true, getPrometheusAddress(recreatedPrometheusPort)) + + t.Logf("SUCCESS: Metrics validation passed - traffic is being redirected to new node-local-dns pod %s", newNodeLocalDNSPod.Name) + + // Step 7: Final cilium validation after node-local-dns restart + t.Log("Step 7: Final cilium validation - ensuring LRP is still active after node-local-dns restart") + validateCiliumLRP(t, ctx, cs, config) + +} + +// validateCiliumLRP checks that LRP is properly configured in cilium +func validateCiliumLRP(t *testing.T, ctx context.Context, cs *k8sclient.Clientset, config *rest.Config) { + ciliumPods, err := cs.CoreV1().Pods(kubeSystemNamespace).List(ctx, metav1.ListOptions{ + LabelSelector: "k8s-app=cilium", + }) + require.NoError(t, err) + require.NotEmpty(t, ciliumPods.Items) + ciliumPod := TakeOne(ciliumPods.Items) + + // Get Kubernetes version to determine validation approach + serverVersion, err := cs.Discovery().ServerVersion() + require.NoError(t, err) + t.Logf("Detected Kubernetes version: %s", serverVersion.String()) + + // Get kube-dns service IP for validation + svc, err := kubernetes.GetService(ctx, cs, kubeSystemNamespace, dnsService) + require.NoError(t, err) + kubeDNSIP := svc.Spec.ClusterIP + + // IMPORTANT: Get node-local-dns pod IP on the SAME node as the cilium pod we're using + selectedNode := ciliumPod.Spec.NodeName + t.Logf("Using cilium pod %s on node %s for validation", ciliumPod.Name, selectedNode) + + // Get node-local-dns pod specifically on the same node as our cilium pod + nodeLocalDNSPods, err := kubernetes.GetPodsByNode(ctx, cs, kubeSystemNamespace, nodeLocalDNSLabelSelector, selectedNode) + require.NoError(t, err) + require.NotEmpty(t, nodeLocalDNSPods.Items, "No node-local-dns pod found on node %s", selectedNode) + + // Use the first (and should be only) node-local-dns pod on this node + nodeLocalDNSPod := nodeLocalDNSPods.Items[0] + nodeLocalDNSIP := nodeLocalDNSPod.Status.PodIP + require.NotEmpty(t, nodeLocalDNSIP, "node-local-dns pod %s has no IP address", nodeLocalDNSPod.Name) + + t.Logf("Validating LRP: kubeDNS IP=%s, nodeLocalDNS IP=%s (pod: %s), node=%s", + kubeDNSIP, nodeLocalDNSIP, nodeLocalDNSPod.Name, selectedNode) + + // Check cilium lrp list + lrpListCmd := []string{"cilium", "lrp", "list"} + lrpOutput, _, err := kubernetes.ExecCmdOnPod(ctx, cs, ciliumPod.Namespace, ciliumPod.Name, "cilium-agent", lrpListCmd, config, false) + require.NoError(t, err) + + // Validate the LRP output structure more thoroughly + lrpOutputStr := string(lrpOutput) + require.Contains(t, lrpOutputStr, "nodelocaldns", "LRP not found in cilium lrp list") + + // Parse LRP list output to validate structure + lrpLines := strings.Split(lrpOutputStr, "\n") + nodelocaldnsFound := false + + for _, line := range lrpLines { + line = strings.TrimSpace(line) + if strings.Contains(line, "nodelocaldns") && strings.Contains(line, "kube-system") { + // Validate that the line contains expected components + require.Contains(t, line, "kube-dns", "LRP line should reference kube-dns service") + nodelocaldnsFound = true + t.Logf("Found nodelocaldns LRP entry: %s", line) + break + } + } + + require.True(t, nodelocaldnsFound, "nodelocaldns LRP entry not found with expected structure in output: %s", lrpOutputStr) + + // Check cilium service list for localredirect + serviceListCmd := []string{"cilium", "service", "list"} + serviceOutput, _, err := kubernetes.ExecCmdOnPod(ctx, cs, ciliumPod.Namespace, ciliumPod.Name, "cilium-agent", serviceListCmd, config, false) + require.NoError(t, err) + require.Contains(t, string(serviceOutput), "LocalRedirect", "LocalRedirect not found in cilium service list") + + // Validate LocalRedirect entries + serviceLines := strings.Split(string(serviceOutput), "\n") + tcpFound := false + udpFound := false + legacyFound := false + + for _, line := range serviceLines { + if strings.Contains(line, "LocalRedirect") && strings.Contains(line, kubeDNSIP) { + // Check if this line contains the expected frontend (kube-dns) and backend (node-local-dns) IPs + if strings.Contains(line, nodeLocalDNSIP) { + // Check for both modern format (with /TCP or /UDP) and legacy format (without protocol) + if strings.Contains(line, "/TCP") { + tcpFound = true + t.Logf("Found TCP LocalRedirect: %s", strings.TrimSpace(line)) + } else if strings.Contains(line, "/UDP") { + udpFound = true + t.Logf("Found UDP LocalRedirect: %s", strings.TrimSpace(line)) + } else { + legacyFound = true + t.Logf("Found legacy LocalRedirect: %s", strings.TrimSpace(line)) + } + } + } + } + + // Validate that we found either legacy format or modern format entries + t.Log("Validating LocalRedirect entries - accepting either legacy format or modern TCP/UDP format") + require.True(t, legacyFound || (tcpFound && udpFound), "Either legacy LocalRedirect entry OR both TCP and UDP entries must be found with frontend IP %s and backend IP %s on node %s", kubeDNSIP, nodeLocalDNSIP, selectedNode) + + t.Logf("Cilium LRP List Output:\n%s", string(lrpOutput)) + t.Logf("Cilium Service List Output:\n%s", string(serviceOutput)) +} + +// restartClientPodsAndGetPod restarts the client daemonset and returns a new pod reference +func restartClientPodsAndGetPod(t *testing.T, ctx context.Context, cs *k8sclient.Clientset, originalPod corev1.Pod) corev1.Pod { + // Get the node name for consistent testing + nodeName := originalPod.Spec.NodeName + + // Restart the daemonset (assumes it's named "lrp-test" based on the manifest) + err := kubernetes.MustRestartDaemonset(ctx, cs, originalPod.Namespace, "lrp-test") + require.NoError(t, err) + + // Wait for the daemonset to be ready + kubernetes.WaitForPodDaemonset(ctx, cs, originalPod.Namespace, "lrp-test", clientLabelSelector) + + // Get the new pod on the same node + clientPods, err := kubernetes.GetPodsByNode(ctx, cs, originalPod.Namespace, clientLabelSelector, nodeName) + require.NoError(t, err) + require.NotEmpty(t, clientPods.Items) + + return TakeOne(clientPods.Items) +} + +// deleteAndRecreateResources deletes and recreates client pods and LRP, returning new pod +func deleteAndRecreateResources(t *testing.T, ctx context.Context, cs *k8sclient.Clientset, originalPod corev1.Pod) corev1.Pod { + config := kubernetes.MustGetRestConfig() + ciliumCS, err := ciliumClientset.NewForConfig(config) + require.NoError(t, err) + + nodeName := originalPod.Spec.NodeName + + // Delete client daemonset + dsClient := cs.AppsV1().DaemonSets(originalPod.Namespace) + clientDS := kubernetes.MustParseDaemonSet(clientPath) + kubernetes.MustDeleteDaemonset(ctx, dsClient, clientDS) + + // Delete LRP + lrpContent, err := os.ReadFile(lrpPath) + require.NoError(t, err) + var lrp ciliumv2.CiliumLocalRedirectPolicy + err = yaml.Unmarshal(lrpContent, &lrp) + require.NoError(t, err) + + lrpClient := ciliumCS.CiliumV2().CiliumLocalRedirectPolicies(lrp.Namespace) + kubernetes.MustDeleteCiliumLocalRedirectPolicy(ctx, lrpClient, lrp) + + // Wait for client pods to be deleted + t.Log("Waiting for client pods to be deleted...") + err = kubernetes.WaitForPodsDelete(ctx, cs, originalPod.Namespace, clientLabelSelector) + require.NoError(t, err) + + // Wait for LRP to be deleted by polling + t.Log("Waiting for LRP to be deleted...") + err = kubernetes.WaitForLRPDelete(ctx, ciliumCS, lrp) + require.NoError(t, err) + + // Recreate LRP + _, cleanupLRP := kubernetes.MustSetupLRP(ctx, ciliumCS, lrpPath) + t.Cleanup(cleanupLRP) + + // Restart node-local-dns pods to pick up new LRP configuration + t.Log("Restarting node-local-dns pods after LRP recreation") + err = kubernetes.MustRestartDaemonset(ctx, cs, kubeSystemNamespace, "node-local-dns") + require.NoError(t, err) + kubernetes.WaitForPodDaemonset(ctx, cs, kubeSystemNamespace, "node-local-dns", nodeLocalDNSLabelSelector) + + // Recreate client daemonset + _, cleanupClient := kubernetes.MustSetupDaemonset(ctx, cs, clientPath) + t.Cleanup(cleanupClient) + + // Wait for pods to be ready + kubernetes.WaitForPodDaemonset(ctx, cs, clientDS.Namespace, clientDS.Name, clientLabelSelector) + + // Get new pod on the same node + clientPods, err := kubernetes.GetPodsByNode(ctx, cs, clientDS.Namespace, clientLabelSelector, nodeName) + require.NoError(t, err) + require.NotEmpty(t, clientPods.Items) + + return TakeOne(clientPods.Items) } // TakeOne takes one item from the slice randomly; if empty, it returns the empty value for the type diff --git a/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml b/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml index 198074750b..add4b386b7 100644 --- a/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml +++ b/test/integration/manifests/cilium/v1.13/cilium-config/cilium-config.yaml @@ -34,7 +34,7 @@ data: enable-l2-neigh-discovery: "true" enable-l7-proxy: "false" enable-local-node-route: "false" - enable-local-redirect-policy: "true" # set to true for lrp test + enable-local-redirect-policy: "false" enable-metrics: "true" enable-policy: default enable-remote-node-identity: "true" diff --git a/test/internal/kubernetes/utils.go b/test/internal/kubernetes/utils.go index ac35efeada..bcaadcb3fd 100644 --- a/test/internal/kubernetes/utils.go +++ b/test/internal/kubernetes/utils.go @@ -365,6 +365,24 @@ func WaitForDeploymentToDelete(ctx context.Context, deploymentsClient typedappsv return errors.Wrapf(retrier.Do(ctx, assertDeploymentNotFound), "could not assert deployment %s isNotFound", d.Name) } +func WaitForLRPDelete(ctx context.Context, ciliumClientset *cilium.Clientset, lrp ciliumv2.CiliumLocalRedirectPolicy) error { + lrpClient := ciliumClientset.CiliumV2().CiliumLocalRedirectPolicies(lrp.Namespace) + + checkLRPDeleted := func() error { + _, err := lrpClient.Get(ctx, lrp.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return errors.Wrapf(err, "could not get LRP %s", lrp.Name) + } + return errors.Errorf("LRP %s still present", lrp.Name) + } + + retrier := retry.Retrier{Attempts: DeleteRetryAttempts, Delay: DeleteRetryDelay} + return errors.Wrap(retrier.Do(ctx, checkLRPDeleted), "failed to wait for LRP to delete") +} + func WaitForPodDaemonset(ctx context.Context, clientset *kubernetes.Clientset, namespace, daemonsetName, podLabelSelector string) error { podsClient := clientset.CoreV1().Pods(namespace) daemonsetClient := clientset.AppsV1().DaemonSets(namespace) From 5d75e2630f2b902fa9587cdeecb6ed2a344b238f Mon Sep 17 00:00:00 2001 From: Keith Nguyen Date: Thu, 4 Dec 2025 22:33:40 -0500 Subject: [PATCH 30/47] feat: add support for windows MAC hex dump (#4122) * feat: add support for windows MAC hex dump * test: invalid MAC address length * fix: typo * fix: comment --------- Co-authored-by: Keith Nguyen --- cns/imds/client.go | 48 ++++++++++++++++++++++++++++++----------- cns/imds/client_test.go | 33 +++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 13 deletions(-) diff --git a/cns/imds/client.go b/cns/imds/client.go index ac06e6d8a3..57ec60303d 100644 --- a/cns/imds/client.go +++ b/cns/imds/client.go @@ -9,6 +9,7 @@ import ( "net" "net/http" "net/url" + "strings" "github.com/avast/retry-go/v4" "github.com/pkg/errors" @@ -45,17 +46,18 @@ func RetryAttempts(attempts uint) ClientOption { } const ( - vmUniqueIDProperty = "vmId" - imdsComputePath = "/metadata/instance/compute" - imdsNetworkPath = "/metadata/instance/network" - imdsVersionsPath = "/metadata/versions" - imdsDefaultAPIVersion = "api-version=2021-01-01" - imdsNCDetailsVersion = "api-version=2025-07-24" - imdsFormatJSON = "format=json" - metadataHeaderKey = "Metadata" - metadataHeaderValue = "true" - defaultRetryAttempts = 3 - defaultIMDSEndpoint = "http://169.254.169.254" + vmUniqueIDProperty = "vmId" + imdsComputePath = "/metadata/instance/compute" + imdsNetworkPath = "/metadata/instance/network" + imdsVersionsPath = "/metadata/versions" + imdsDefaultAPIVersion = "api-version=2021-01-01" + imdsNCDetailsVersion = "api-version=2025-07-24" + imdsMACAddressStringLength = 12 // 6 bytes in hex equals 12 characters + imdsFormatJSON = "format=json" + metadataHeaderKey = "Metadata" + metadataHeaderValue = "true" + defaultRetryAttempts = 3 + defaultIMDSEndpoint = "http://169.254.169.254" ) var ( @@ -218,7 +220,8 @@ func (h *HardwareAddr) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s); err != nil { return errors.Wrap(err, "failed to unmarshal JSON data") } - mac, err := net.ParseMAC(s) + + mac, err := parseMacAddress(s) if err != nil { return errors.Wrap(err, "failed to parse MAC address") } @@ -226,6 +229,27 @@ func (h *HardwareAddr) UnmarshalJSON(data []byte) error { return nil } +// parseMacAddress is a wrapper around net.ParseMAC to handle Windows MAC address. Windows MAC address is a pure hex +// dump without delimiter, so we need to add delimiters. This happens when CNS gets MAC address from IMDS. +func parseMacAddress(s string) (net.HardwareAddr, error) { + if !strings.ContainsAny(s, ":-.") && len(s) == imdsMACAddressStringLength { + var sb strings.Builder + for i := 0; i < len(s); i += 2 { + if i > 0 { + sb.WriteByte(':') + } + sb.WriteString(s[i : i+2]) + } + s = sb.String() + } + + mac, err := net.ParseMAC(s) + if err != nil { + return nil, errors.Wrap(err, "failed to parse MAC address") + } + return mac, nil +} + func (h *HardwareAddr) String() string { return net.HardwareAddr(*h).String() } diff --git a/cns/imds/client_test.go b/cns/imds/client_test.go index ac97ba5251..263a5276f0 100644 --- a/cns/imds/client_test.go +++ b/cns/imds/client_test.go @@ -111,7 +111,7 @@ func TestGetNetworkInterfaces(t *testing.T) { }, { "interfaceCompartmentID": "", - "macAddress": "00:00:5e:00:53:02" + "macAddress": "00005e005302" } ] }`) @@ -160,6 +160,37 @@ func TestGetNetworkInterfaces(t *testing.T) { assert.NotEqual(t, firstMAC.String(), secondMAC.String(), "MAC addresses should be different") } +func TestGetNetworkInterfacesInvalidMAC(t *testing.T) { + networkInterfaces := []byte(`{ + "interface": [ + { + "interfaceCompartmentID": "nc-12345-67890", + "macAddress": "00005e00530" // incorrect windows MAC address length + }, + ] + }`) + + mockIMDSServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + metadataHeader := r.Header.Get("Metadata") + assert.Equal(t, "true", metadataHeader) + + assert.Contains(t, r.URL.Path, "/metadata/instance/network") + + w.WriteHeader(http.StatusOK) + _, writeErr := w.Write(networkInterfaces) + if writeErr != nil { + t.Errorf("error writing response: %v", writeErr) + return + } + })) + defer mockIMDSServer.Close() + + imdsClient := imds.NewClient(imds.Endpoint(mockIMDSServer.URL)) + interfaces, err := imdsClient.GetNetworkInterfaces(context.Background()) + require.Error(t, err, "expected error for invalid MAC address") + require.Nil(t, interfaces, "expected nil interfaces on error") +} + func TestGetNetworkInterfacesInvalidEndpoint(t *testing.T) { imdsClient := imds.NewClient(imds.Endpoint(string([]byte{0x7f})), imds.RetryAttempts(1)) _, err := imdsClient.GetNetworkInterfaces(context.Background()) From 854149b25e6cbe2c1eb75230138c975b472ae06f Mon Sep 17 00:00:00 2001 From: Jackie Luc <15662837+jackieluc@users.noreply.github.com> Date: Thu, 4 Dec 2025 20:10:16 -0800 Subject: [PATCH 31/47] chore: add swiftv2 windows conflist (#4144) * add swiftv2 windows conflist * lint: spaces * feat: add disableAsyncDelete config * fix: follow CNI spec with plugins property --- cni/azure-windows-swiftv2-stateless.conflist | 27 ++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 cni/azure-windows-swiftv2-stateless.conflist diff --git a/cni/azure-windows-swiftv2-stateless.conflist b/cni/azure-windows-swiftv2-stateless.conflist new file mode 100644 index 0000000000..5463abd647 --- /dev/null +++ b/cni/azure-windows-swiftv2-stateless.conflist @@ -0,0 +1,27 @@ +{ + "cniVersion": "1.0.0", + "name": "azure", + "plugins": [ + { + "type": "azure-vnet", + "mode": "bridge", + "bridge": "azure0", + "capabilities": { + "portMappings": true, + "dns": true + }, + "ipam": { + "type": "azure-cns" + }, + "dns": { + "Nameservers": [ + "168.63.129.16" + ], + "Search": [ + "svc.cluster.local" + ] + }, + "disableAsyncDelete": true + } + ] +} \ No newline at end of file From 7a87ed97d65873adef25132b0f4a72261951f98d Mon Sep 17 00:00:00 2001 From: Sharif Nasser Kadamani Date: Wed, 10 Dec 2025 14:57:58 -0800 Subject: [PATCH 32/47] ci: add AKS Swiftv2 Manifold E2E in ACN pipeline (#4128) * add AKS Swiftv2 Manifold E2E in ACN pipeline * add AKS Swiftv2 Singularity E2E to ACN PR pipeline schedule * remove test schedule * limit run to one region * use master branch * neat code --- .../swiftv2-manifold-e2e.stages.yaml | 35 +++++++++++++++++++ .pipelines/pipeline.yaml | 7 ++++ .pipelines/run-pipeline.yaml | 7 +++- 3 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 .pipelines/multitenancy/swiftv2-manifold-e2e.stages.yaml diff --git a/.pipelines/multitenancy/swiftv2-manifold-e2e.stages.yaml b/.pipelines/multitenancy/swiftv2-manifold-e2e.stages.yaml new file mode 100644 index 0000000000..674e5aa0b1 --- /dev/null +++ b/.pipelines/multitenancy/swiftv2-manifold-e2e.stages.yaml @@ -0,0 +1,35 @@ +parameters: + name: "" + dependsOn: "" + +stages: + - stage: manifold_e2e + displayName: E2E - AKS Swiftv2 Manifold + variables: + TAG: $[ stageDependencies.setup.env.outputs['EnvironmentalVariables.Tag'] ] + IMAGE_REPO_PATH: $[ format('{0}/', stageDependencies.setup.env.outputs['EnvironmentalVariables.imageRepositoryPath']) ] + ${{ if eq(parameters.dependsOn, 'publish') }}: + IMAGE_REPO_PATH_REF: 'azure-' + ${{ else }}: + IMAGE_REPO_PATH_REF: $(IMAGE_REPO_PATH) + dependsOn: + - ${{ parameters.dependsOn }} + - setup + jobs: + - job: ${{ parameters.name }} + displayName: AKS Swiftv2 Multitenancy Manifold E2E Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 210 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + isCustom: true + type: linux + steps: + - task: TriggerBuild@3 + inputs: + buildDefinition: '391699' + templateParameters: 'regions: ["westus2"], useAcnPublic: true, cnscniversion: $(TAG), cnscniversionwindows: $(TAG), cnscniImagePrefix: $(IMAGE_REPO_PATH_REF)' + useSameBranch: false + queueBuildForUserThatTriggeredBuild: true + branchToUse: 'refs/heads/master' + waitForQueuedBuildsToFinish: true + authenticationMethod: 'OAuth Token' \ No newline at end of file diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index a249d127ca..cd8a3bc936 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -488,6 +488,13 @@ stages: dependsOn: ["test"] scaleup: 50 + - ${{ if eq(variables['Build.Reason'], 'Schedule') }}: + # AKS Swiftv2 Manifold E2E tests + - template: multitenancy/swiftv2-manifold-e2e.stages.yaml + parameters: + name: "swiftv2_manifold_e2e" + dependsOn: publish + - stage: delete displayName: Delete Clusters condition: always() diff --git a/.pipelines/run-pipeline.yaml b/.pipelines/run-pipeline.yaml index 6ad478ef86..0ae566a47b 100644 --- a/.pipelines/run-pipeline.yaml +++ b/.pipelines/run-pipeline.yaml @@ -479,7 +479,12 @@ stages: vmSize: Standard_B2ms dependsOn: manifests scaleup: 50 - + + # AKS Swiftv2 Manifold E2E tests + - template: multitenancy/swiftv2-manifold-e2e.stages.yaml + parameters: + name: "swiftv2_manifold_e2e" + dependsOn: manifests - stage: delete displayName: Delete Clusters From 2e853374d220f1d5eeec247a434bd5b79dd78924 Mon Sep 17 00:00:00 2001 From: Isaiah Raya Date: Fri, 12 Dec 2025 09:29:46 -0800 Subject: [PATCH 33/47] =?UTF-8?q?forwardport:=20[NPM]=20[Vulnerability]=20?= =?UTF-8?q?Resolve=20stdlib=20CVEs=20by=20Updating=20go=20Version=20from?= =?UTF-8?q?=E2=80=A6=20(#4166)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [NPM] [Vulnerability] Resolve stdlib CVEs by Updating go Version from 1.23 -> 1.25.5 and Update Base Windows Image (#4153) * update go version to resolve stdlib cves in npm * bump go to 1.25.5 * updated windows base image to resolve cves * Revert "[NPM] [Backport] Remove NPM Windows 2022 Tests from the NPM Conformance Tests + Clean Up NPM release/v1.4 (#3917)" This reverts commit c0e7aeb23bf7161ed116bdeb22032af1aa616b18. * Partial revert: restore NPM manifests, keep pipeline test changes * revert windows conformance tests --- npm/linux.Dockerfile | 4 ++-- npm/windows.Dockerfile | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/npm/linux.Dockerfile b/npm/linux.Dockerfile index bc9fd5d2bd..71ae469db1 100644 --- a/npm/linux.Dockerfile +++ b/npm/linux.Dockerfile @@ -1,10 +1,10 @@ -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24-azurelinux3.0 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.25.5 AS builder ARG VERSION ARG NPM_AI_PATH ARG NPM_AI_ID WORKDIR /usr/local/src COPY . . -RUN CGO_ENABLED=0 go build -v -o /usr/local/bin/azure-npm -ldflags "-s -w -X main.version="$VERSION" -X "$NPM_AI_PATH"="$NPM_AI_ID"" -gcflags="-dwarflocationlists=true" npm/cmd/*.go +RUN MS_GO_NOSYSTEMCRYPTO=1 CGO_ENABLED=0 go build -v -o /usr/local/bin/azure-npm -ldflags "-s -w -X main.version="$VERSION" -X "$NPM_AI_PATH"="$NPM_AI_ID"" -gcflags="-dwarflocationlists=true" npm/cmd/*.go FROM mcr.microsoft.com/mirror/docker/library/ubuntu:24.04 as linux COPY --from=builder /usr/local/bin/azure-npm /usr/bin/azure-npm diff --git a/npm/windows.Dockerfile b/npm/windows.Dockerfile index dab7b6017b..cd31372bcb 100644 --- a/npm/windows.Dockerfile +++ b/npm/windows.Dockerfile @@ -1,14 +1,14 @@ ARG OS_VERSION -FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.24-azurelinux3.0 AS builder +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.25.5 AS builder ARG VERSION ARG NPM_AI_PATH ARG NPM_AI_ID WORKDIR /usr/local/src COPY . . -RUN GOOS=windows CGO_ENABLED=0 go build -v -o /usr/local/bin/azure-npm.exe -ldflags "-s -w -X main.version="$VERSION" -X "$NPM_AI_PATH"="$NPM_AI_ID"" -gcflags="-dwarflocationlists=true" npm/cmd/*.go +RUN MS_GO_NOSYSTEMCRYPTO=1 GOOS=windows CGO_ENABLED=0 go build -v -o /usr/local/bin/azure-npm.exe -ldflags "-s -w -X main.version="$VERSION" -X "$NPM_AI_PATH"="$NPM_AI_ID"" -gcflags="-dwarflocationlists=true" npm/cmd/*.go # intermediate for win-ltsc2022 -FROM mcr.microsoft.com/windows/servercore@sha256:45952938708fbde6ec0b5b94de68bcdec3f8c838be018536b1e9e5bd95e6b943 as windows +FROM mcr.microsoft.com/windows/servercore@sha256:3a2a2fdfbae2f720f6fe26f2d7680146712ce330f605b02a61d624889735c72e as windows COPY --from=builder /usr/local/src/npm/examples/windows/kubeconfigtemplate.yaml kubeconfigtemplate.yaml COPY --from=builder /usr/local/src/npm/examples/windows/setkubeconfigpath.ps1 setkubeconfigpath.ps1 COPY --from=builder /usr/local/src/npm/examples/windows/setkubeconfigpath-capz.ps1 setkubeconfigpath-capz.ps1 From d7cd197071e7886b516b404c7b4c1f1f7079f7f8 Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Fri, 12 Dec 2025 09:53:29 -0800 Subject: [PATCH 34/47] ci: re-enable port forward tests on windows (#4164) re-enable port forward tests on windows --- .pipelines/singletenancy/aks-swift/e2e-job-template.yaml | 2 +- .pipelines/singletenancy/aks-swift/e2e.stages.yaml | 2 +- .pipelines/singletenancy/aks/e2e-job-template.yaml | 2 +- .pipelines/singletenancy/aks/e2e.stages.yaml | 2 +- .../azure-cni-overlay-stateless-e2e-job-template.yaml | 2 +- .../azure-cni-overlay-stateless-e2e.stages.yaml | 2 +- .../azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml | 2 +- .../azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml | 2 +- .../dualstack-overlay/dualstackoverlay-e2e-job-template.yaml | 2 +- .../dualstack-overlay/dualstackoverlay-e2e.stages.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml b/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml index d3b754b1bc..64b612da45 100644 --- a/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml +++ b/.pipelines/singletenancy/aks-swift/e2e-job-template.yaml @@ -71,7 +71,7 @@ stages: dependsOn: ${{ parameters.name }} datapath: true dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios + portforward: true hostport: true service: true diff --git a/.pipelines/singletenancy/aks-swift/e2e.stages.yaml b/.pipelines/singletenancy/aks-swift/e2e.stages.yaml index ed9c149342..714a559771 100644 --- a/.pipelines/singletenancy/aks-swift/e2e.stages.yaml +++ b/.pipelines/singletenancy/aks-swift/e2e.stages.yaml @@ -76,7 +76,7 @@ stages: dependsOn: ${{ parameters.name }} datapath: true dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios + portforward: true hostport: true service: true diff --git a/.pipelines/singletenancy/aks/e2e-job-template.yaml b/.pipelines/singletenancy/aks/e2e-job-template.yaml index d7de01afcb..5a8c3c28b5 100644 --- a/.pipelines/singletenancy/aks/e2e-job-template.yaml +++ b/.pipelines/singletenancy/aks/e2e-job-template.yaml @@ -74,7 +74,7 @@ stages: os: ${{ parameters.os }} datapath: true dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios + portforward: true hybridWin: true service: true hostport: true diff --git a/.pipelines/singletenancy/aks/e2e.stages.yaml b/.pipelines/singletenancy/aks/e2e.stages.yaml index 7f5d4e2a59..885e1d1043 100644 --- a/.pipelines/singletenancy/aks/e2e.stages.yaml +++ b/.pipelines/singletenancy/aks/e2e.stages.yaml @@ -80,7 +80,7 @@ stages: os: ${{ parameters.os }} datapath: true dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios + portforward: true hybridWin: true service: true hostport: true diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml index ca44ef3129..3c955293d3 100644 --- a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e-job-template.yaml @@ -72,7 +72,7 @@ stages: dependsOn: ${{ parameters.name }}_windows datapath: true dns: true - portforward: false # Unblock Pipeline, as stateless is tested in windows, broken for all windows scenarios + portforward: true hostport: true service: true hybridWin: true diff --git a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml index 9c9eecda31..2a2c69f1fb 100644 --- a/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay-stateless/azure-cni-overlay-stateless-e2e.stages.yaml @@ -78,7 +78,7 @@ stages: dependsOn: ${{ parameters.name }}_windows datapath: true dns: true - portforward: false # Unblock Pipeline, as stateless is tested in windows, broken for all windows scenarios + portforward: true hostport: true service: true hybridWin: true diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml index 5208cb98c8..62b1d0a6fa 100644 --- a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e-job-template.yaml @@ -71,7 +71,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} datapath: true dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios + portforward: true hostport: true service: true hybridWin: ${{ eq(parameters.os, 'windows') }} diff --git a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml index ed4a46bc84..bbce0bac1f 100644 --- a/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml +++ b/.pipelines/singletenancy/azure-cni-overlay/azure-cni-overlay-e2e.stages.yaml @@ -77,7 +77,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} datapath: true dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # Unblock Pipeline, broken for all windows scenarios + portforward: true hostport: true service: true hybridWin: ${{ eq(parameters.os, 'windows') }} diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml index 5fd8dae123..5302998d16 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml @@ -71,7 +71,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} dualstack: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX not WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. Covered by go test in E2E step template dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases + portforward: true service: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. hostport: true hybridWin: ${{ eq(parameters.os, 'windows') }} diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml index 123f3d572d..4b368ba77e 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e.stages.yaml @@ -76,7 +76,7 @@ stages: dependsOn: ${{ parameters.name }}_${{ parameters.os }} dualstack: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX not WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. Covered by go test in E2E step template dns: true - portforward: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases + portforward: true service: ${{ eq(parameters.os, 'linux') }} # RUN IN LINUX NOT WINDOWS Currently broken for scenario and blocking releases, HNS is investigating. hostport: true hybridWin: ${{ eq(parameters.os, 'windows') }} From 38bd8d3aab03213f67020772cb93e7907310ef9c Mon Sep 17 00:00:00 2001 From: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Date: Wed, 17 Dec 2025 18:05:28 -0800 Subject: [PATCH 35/47] Datapath tests for Long running clusters. (#4142) * Add SwiftV2 long-running pipeline with scheduled tests - Implemented scheduled pipeline running every 1 hour with persistent infrastructure - Split test execution into 2 jobs: Create (with 20min wait) and Delete - Added 8 test scenarios across 2 AKS clusters, 4 VNets, different subnets - Implemented two-phase deletion strategy to prevent PNI ReservationInUse errors - Added context timeouts on kubectl commands with force delete fallbacks - Resource naming uses RG name as BUILD_ID for uniqueness across parallel setups - Added SkipAutoDeleteTill tags to prevent automatic resource cleanup - Conditional setup stages controlled by runSetupStages parameter - Auto-generate RG name from location or allow custom names for parallel setups - Added comprehensive README with setup instructions and troubleshooting - Node selection by agentpool labels with usage tracking to prevent conflicts - Kubernetes naming compliance (RFC 1123) for all resources fix ginkgo flag. Add datapath tests. Delete old test file. Add testcases for provate endpoint. Ginkgo run specs only on specified files. update pipeline params. Add ginkgo tags Add datapath tests. Add ginkgo build tags. remove wait time. set namespace. update pod image. Add more nsg rules to block subnets s1 and s2 test change. Change delegated subnet address range. Use delegated interface for network connectivity tests. Datapath test between clusters. test. test private endpoints. fix private endpoint tests. Set storage account names in putput var. set storage account name. fix pn names. update pe update pe test. update sas token generation. Add node labels for sw2 scenario, cleanup pods on any test failure. enable nsg tests. update storage. Add rules to nsg. disable private endpoint negative test. disable public network access on storage account with private endpoint. wait for default nsg to be created. disable negative test on private endpoint. private endpoint depends on aks cluster vnets, change pipeline job dependencies. Add node labels for each workload type and nic capacity. make sku constant. Update readme, set schedule for long running cluster on test branch. * Update readme file. * fix syntax for pe test. * Create NSG rules with unique priority. * Update go.mod Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Update test/integration/swiftv2/longRunningCluster/datapath_create_test.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Update test/integration/swiftv2/longRunningCluster/datapath_delete_test.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Update test/integration/swiftv2/longRunningCluster/datapath_connectivity_test.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Update test/integration/swiftv2/longRunningCluster/datapath_delete_test.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Error handling for private endpoint tests. * Private endpoint tests. * update private endpoint test. * update pod.yaml * Check if mtpnc is cleaned up after pods are deleted. * Update vnet names. * add container readiness check. * update pod.yaml * Update pod.yaml * Update connectivity test. * Update netcat curl test. * Enable delete pods. * Remove test changes. * remove test changes for storage accounts. * update go.mod * Make dockerfiles. * lint fixes * update dockerfiles. * Lint fix. * reset package name. * fix package name. * refactor: clean up long-running pipeline and update tests - Remove service connection from pipeline parameters - Update tests to use ginkgo v1 - Replace ginkgo CLI with go test - Remove fixed sleep timers - Add MTPNC and pod status verification in test code - Remove skip delete tags - Clean up long-running pipeline template * Assign and remove rbac role after every run. Set pipeline vars for delegator app. Replace fixed and infinite sleeps with bounded retry loops Optimize kubeconfig management by fetching once and reusing across jobs add retry for Private endpoint ip to be available. Remove unnecessary validation. cleanup. change kubeconfig paths. Set kubeconfig. * Make dockerfiles * fetch go.sum from master branch. * Check if rbac roles are cleaned up after delete. * TCP netcat connectivity test improvements. Fetch storage account names for tests run without setup stage. * lint fix. * workload inputs are validated skip g204 check. * update dockerfiles * Validate workload type with allowed list of values. * Pick Dockerfiles from upstream master * kubectl get nodes by label * Set scheduled runs on master. --------- Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Co-authored-by: sivakami Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .pipelines/swiftv2-long-running/README.md | 237 ++++++ .pipelines/swiftv2-long-running/pipeline.yaml | 38 +- .../scripts/create_aks.sh | 138 +++- .../scripts/create_nsg.sh | 299 +++++-- .../swiftv2-long-running/scripts/create_pe.sh | 58 +- .../scripts/create_peerings.sh | 13 +- .../scripts/create_storage.sh | 33 +- .../scripts/create_vnets.sh | 162 ++-- .../scripts/manage_storage_rbac.sh | 83 ++ .../long-running-pipeline-template.yaml | 309 +++++-- hack/aks/Makefile | 15 +- .../swiftv2/long-running-cluster/pod.yaml | 38 + .../long-running-cluster/podnetwork.yaml | 15 + .../podnetworkinstance.yaml | 13 + .../integration/swiftv2/helpers/az_helpers.go | 405 +++++++++ .../swiftv2/longRunningCluster/datapath.go | 766 ++++++++++++++++++ .../datapath_connectivity_test.go | 156 ++++ .../datapath_create_test.go | 115 +++ .../datapath_delete_test.go | 113 +++ .../datapath_private_endpoint_test.go | 141 ++++ .../longRunningCluster/datapath_scale_test.go | 194 +++++ 21 files changed, 3055 insertions(+), 286 deletions(-) create mode 100644 .pipelines/swiftv2-long-running/README.md mode change 100644 => 100755 .pipelines/swiftv2-long-running/scripts/create_nsg.sh create mode 100644 .pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh create mode 100644 test/integration/manifests/swiftv2/long-running-cluster/pod.yaml create mode 100644 test/integration/manifests/swiftv2/long-running-cluster/podnetwork.yaml create mode 100644 test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml create mode 100644 test/integration/swiftv2/helpers/az_helpers.go create mode 100644 test/integration/swiftv2/longRunningCluster/datapath.go create mode 100644 test/integration/swiftv2/longRunningCluster/datapath_connectivity_test.go create mode 100644 test/integration/swiftv2/longRunningCluster/datapath_create_test.go create mode 100644 test/integration/swiftv2/longRunningCluster/datapath_delete_test.go create mode 100644 test/integration/swiftv2/longRunningCluster/datapath_private_endpoint_test.go create mode 100644 test/integration/swiftv2/longRunningCluster/datapath_scale_test.go diff --git a/.pipelines/swiftv2-long-running/README.md b/.pipelines/swiftv2-long-running/README.md new file mode 100644 index 0000000000..4b01af3bc3 --- /dev/null +++ b/.pipelines/swiftv2-long-running/README.md @@ -0,0 +1,237 @@ +# SwiftV2 Long-Running Pipeline + +This pipeline tests SwiftV2 pod networking in a persistent environment with scheduled test runs. + +## Architecture Overview + +**Infrastructure (Persistent)**: +- **2 AKS Clusters**: aks-1, aks-2 (4 nodes each: 2 low-NIC default pool, 2 high-NIC nplinux pool) +- **4 VNets**: cx_vnet_v1, cx_vnet_v2, cx_vnet_v3 (Customer 1 with PE to storage), cx_vnet_v4 (Customer 2) +- **VNet Peerings**: vnet mesh. +- **Storage Account**: With private endpoint from cx_vnet_v1 +- **NSGs**: Restricting traffic between subnets (s1, s2) in vnet cx_vnet_v1. +- **Node Labels**: All nodes labeled with `workload-type` and `nic-capacity` for targeted test execution + + +**Node Labeling for Multiple Workload Types**: +Each node pool gets labeled with its designated workload type during setup: +```bash +# During cluster creation or node pool addition: +kubectl label nodes -l workload-type=swiftv2-linux +kubectl label nodes -l workload-type=swiftv2-linuxbyon +kubectl label nodes -l workload-type=swiftv2-l1vhaccelnet +kubectl label nodes -l workload-type=swiftv2-l1vhib +``` + +## How It Works + +### Scheduled Test Flow +Every scheduled run, the pipeline: +1. Skips setup stages (infrastructure already exists) +2. **Job 1 - Create Resources**: Creates 8 test scenarios (PodNetwork, PNI, Pods with TCP netcat listeners on port 8080) +3. **Job 2 - Connectivity Tests**: Tests TCP connectivity between pods (9 test cases), then waits 20 minutes +4. **Job 3 - Private Endpoint Tests**: Tests private endpoint access and tenant isolation (5 test cases) +5. **Job 4 - Delete Resources**: Deletes all test resources (Phase 1: Pods, Phase 2: PNI/PN/Namespaces) +6. Reports results + + +## Test Case Details + +### 8 Pod Scenarios (Created in Job 1) + +All test scenarios create the following resources: +- **PodNetwork**: Defines the network configuration for a VNet/subnet combination +- **PodNetworkInstance**: Instance-level configuration with IP allocation +- **Pod**: Test pod running nicolaka/netshoot with TCP netcat listener on port 8080 + +| # | Scenario | Cluster | VNet | Subnet | Node Type | Pod Name | Purpose | +|---|----------|---------|------|--------|-----------|----------|---------| +| 1 | Customer2-AKS2-VnetV4-S1-LowNic | aks-2 | cx_vnet_v4 | s1 | low-nic | pod-c2-aks2-v4s1-low | Tenant B pod for isolation testing | +| 2 | Customer2-AKS2-VnetV4-S1-HighNic | aks-2 | cx_vnet_v4 | s1 | high-nic | pod-c2-aks2-v4s1-high | Tenant B pod on high-NIC node | +| 3 | Customer1-AKS1-VnetV1-S1-LowNic | aks-1 | cx_vnet_v1 | s1 | low-nic | pod-c1-aks1-v1s1-low | Tenant A pod in NSG-protected subnet | +| 4 | Customer1-AKS1-VnetV1-S2-LowNic | aks-1 | cx_vnet_v1 | s2 | low-nic | pod-c1-aks1-v1s2-low | Tenant A pod for NSG isolation test | +| 5 | Customer1-AKS1-VnetV1-S2-HighNic | aks-1 | cx_vnet_v1 | s2 | high-nic | pod-c1-aks1-v1s2-high | Tenant A pod on high-NIC node | +| 6 | Customer1-AKS1-VnetV2-S1-HighNic | aks-1 | cx_vnet_v2 | s1 | high-nic | pod-c1-aks1-v2s1-high | Tenant A pod in peered VNet | +| 7 | Customer1-AKS2-VnetV2-S1-LowNic | aks-2 | cx_vnet_v2 | s1 | low-nic | pod-c1-aks2-v2s1-low | Cross-cluster same VNet test | +| 8 | Customer1-AKS2-VnetV3-S1-HighNic | aks-2 | cx_vnet_v3 | s1 | high-nic | pod-c1-aks2-v3s1-high | Private endpoint access test | + +### Connectivity Tests (9 Test Cases in Job 2) + +Tests TCP connectivity between pods using netcat with 3-second timeout: + +**Expected to SUCCEED (4 tests)**: + +| Test | Source → Destination | Validation | Purpose | +|------|---------------------|------------|---------| +| SameVNetSameSubnet | pod-c1-aks1-v1s2-low → pod-c1-aks1-v1s2-high | TCP Connected | Basic same-subnet connectivity | +| DifferentVNetSameCustomer | pod-c1-aks1-v2s1-high → pod-c1-aks2-v2s1-low | TCP Connected | Cross-cluster, same VNet (v2) | +| PeeredVNets | pod-c1-aks1-v1s2-low → pod-c1-aks1-v2s1-high | TCP Connected | VNet peering (v1 ↔ v2) | +| PeeredVNets_v2tov3 | pod-c1-aks1-v2s1-high → pod-c1-aks2-v3s1-high | TCP Connected | VNet peering across clusters | + +**Expected to FAIL (5 tests)**: + +| Test | Source → Destination | Expected Error | Purpose | +|------|---------------------|----------------|---------| +| NSGBlocked_S1toS2 | pod-c1-aks1-v1s1-low → pod-c1-aks1-v1s2-high | Connection timeout | NSG blocks s1→s2 in cx_vnet_v1 | +| NSGBlocked_S2toS1 | pod-c1-aks1-v1s2-low → pod-c1-aks1-v1s1-low | Connection timeout | NSG blocks s2→s1 (bidirectional) | +| DifferentCustomers_V1toV4 | pod-c1-aks1-v1s2-low → pod-c2-aks2-v4s1-low | Connection timeout | Customer isolation (no peering) | +| DifferentCustomers_V2toV4 | pod-c1-aks1-v2s1-high → pod-c2-aks2-v4s1-high | Connection timeout | Customer isolation (no peering) | +| UnpeeredVNets_V3toV4 | pod-c1-aks2-v3s1-high → pod-c2-aks2-v4s1-low | Connection timeout | No peering between v3 and v4 | + +**NSG Rules Configuration**: +- cx_vnet_v1 has NSG rules blocking traffic between s1 and s2 subnets: + - Deny outbound from s1 to s2 (priority 100) + - Deny inbound from s1 to s2 (priority 110) + - Deny outbound from s2 to s1 (priority 100) + - Deny inbound from s2 to s1 (priority 110) + +### Private Endpoint Tests (5 Test Cases in Job 3) + +Tests access to Azure Storage Account via Private Endpoint with public network access disabled: + +**Expected to SUCCEED (4 tests)**: + +| Test | Source → Storage | Validation | Purpose | +|------|-----------------|------------|---------| +| TenantA_VNetV1_S1_to_StorageA | pod-c1-aks1-v1s1-low → Storage-A | Blob download via SAS | Access via private endpoint from VNet V1 | +| TenantA_VNetV1_S2_to_StorageA | pod-c1-aks1-v1s2-low → Storage-A | Blob download via SAS | Access via private endpoint from VNet V1 | +| TenantA_VNetV2_to_StorageA | pod-c1-aks1-v2s1-high → Storage-A | Blob download via SAS | Access via peered VNet (V2 peered with V1) | +| TenantA_VNetV3_to_StorageA | pod-c1-aks2-v3s1-high → Storage-A | Blob download via SAS | Access via peered VNet from different cluster | + +**Expected to FAIL (1 test)**: + +| Test | Source → Storage | Expected Error | Purpose | +|------|-----------------|----------------|---------| +| TenantB_to_StorageA_Isolation | pod-c2-aks2-v4s1-low → Storage-A | Connection timeout/failed | Tenant isolation - no private endpoint access, public blocked | + +**Private Endpoint Configuration**: +- Private endpoint created in cx_vnet_v1 subnet 'pe' +- Private DNS zone `privatelink.blob.core.windows.net` linked to: + - cx_vnet_v1, cx_vnet_v2, cx_vnet_v3 (Tenant A VNets) + - aks-1 and aks-2 cluster VNets +- Storage Account 1 (Tenant A): + - Public network access: **Disabled** + - Shared key access: Disabled (Azure AD only) + - Blob public access: Disabled +- Storage Account 2 (Tenant B): Public access enabled (for future tests) + +**Test Flow**: +1. DNS resolution: Storage FQDN resolves to private IP for Tenant A, fails/public IP for Tenant B +2. Generate SAS token: Azure AD authentication via management plane +3. Download blob: Using curl with SAS token via data plane +4. Validation: Verify blob content matches expected value + +### Resource Creation Patterns + +**Naming Convention**: +``` +BUILD_ID = + +PodNetwork: pn--- +PodNetworkInstance: pni--- +Namespace: pn--- +Pod: pod- +``` + +**Example** (for `resourceGroupName=sv2-long-run-centraluseuap`): +``` +pn-sv2-long-run-centraluseuap-v1-s1 +pni-sv2-long-run-centraluseuap-v1-s1 +pn-sv2-long-run-centraluseuap-v1-s1 (namespace) +pod-c1-aks1-v1s1-low +``` + +**VNet Name Simplification**: +- `cx_vnet_v1` → `v1` +- `cx_vnet_v2` → `v2` +- `cx_vnet_v3` → `v3` +- `cx_vnet_v4` → `v4` + + +## Node Pool Configuration + +### Node Labels and Architecture + +All nodes in the clusters are labeled with two key labels for workload identification and NIC capacity. These labels are applied during cluster creation by the `create_aks.sh` script. + +**1. Workload Type Label** (`workload-type`): +- Purpose: Identifies which test scenario group the node belongs to +- Current value: `swiftv2-linux` (applied to all nodes in current setup) +- Applied during: Cluster creation in Stage 1 (AKSClusterAndNetworking) +- Applied by: `.pipelines/swiftv2-long-running/scripts/create_aks.sh` +- Future use: Supports multiple workload types running as separate stages (e.g., `swiftv2-windows`, `swiftv2-byonodeid`) +- Stage isolation: Each test stage uses `WORKLOAD_TYPE` environment variable to filter nodes + +**2. NIC Capacity Label** (`nic-capacity`): +- Purpose: Identifies the NIC capacity tier of the node +- Applied during: Cluster creation in Stage 1 (AKSClusterAndNetworking) +- Applied by: `.pipelines/swiftv2-long-running/scripts/create_aks.sh` +- Values: + - `low-nic`: Default nodepool (nodepool1) with `Standard_D4s_v3` (1 NIC) + - `high-nic`: NPLinux nodepool (nplinux) with `Standard_D16s_v3` (7 NICs) + +**Label Application in create_aks.sh**: +```bash +# Step 1: All nodes get workload-type label +kubectl label nodes --all workload-type=swiftv2-linux --overwrite + +# Step 2: Default nodepool gets low-nic capacity label +kubectl label nodes -l agentpool=nodepool1 nic-capacity=low-nic --overwrite + +# Step 3: NPLinux nodepool gets high-nic capacity label +kubectl label nodes -l agentpool=nplinux nic-capacity=high-nic --overwrite +``` + +### Node Selection in Tests + +Tests use these labels to select appropriate nodes dynamically: +- **Function**: `GetNodesByNicCount()` in `test/integration/swiftv2/longRunningCluster/datapath.go` +- **Filtering**: Nodes filtered by BOTH `workload-type` AND `nic-capacity` labels +- **Environment Variable**: `WORKLOAD_TYPE` (set by each test stage) determines which nodes are used + - Current: `WORKLOAD_TYPE=swiftv2-linux` in ManagedNodeDataPathTests stage + - Future: Different values for each stage (e.g., `swiftv2-byonodeid`, `swiftv2-windows`) +- **Selection Logic**: + ```go + // Get low-nic nodes with matching workload type + kubectl get nodes -l "nic-capacity=low-nic,workload-type=$WORKLOAD_TYPE" + + // Get high-nic nodes with matching workload type + kubectl get nodes -l "nic-capacity=high-nic,workload-type=$WORKLOAD_TYPE" + ``` +- **Pod Assignment**: + - Low-NIC nodes: Limited to 1 pod per node + - High-NIC nodes: Currently limited to 1 pod per node in test logic + +**Node Pool Configuration**: + +| Node Pool | VM SKU | NICs | Label | Pods per Node | +|-----------|--------|------|-------|---------------| +| nodepool1 (default) | `Standard_D4s_v3` | 1 | `nic-capacity=low-nic` | 1 | +| nplinux | `Standard_D16s_v3` | 7 | `nic-capacity=high-nic` | 1 (current test logic) | + +**Note**: VM SKUs are hardcoded as constants in the pipeline template and cannot be changed by users. + +## File Structure + +``` +.pipelines/swiftv2-long-running/ +├── pipeline.yaml # Main pipeline with schedule +├── README.md # This file +├── template/ +│ └── long-running-pipeline-template.yaml # Stage definitions (2 jobs) +└── scripts/ + ├── create_aks.sh # AKS cluster creation + ├── create_vnets.sh # VNet and subnet creation + ├── create_peerings.sh # VNet peering setup + ├── create_storage.sh # Storage account creation + ├── create_nsg.sh # Network security groups + └── create_pe.sh # Private endpoint setup + +test/integration/swiftv2/longRunningCluster/ +├── datapath_test.go # Original combined test (deprecated) +├── datapath_create_test.go # Create test scenarios (Job 1) +├── datapath_delete_test.go # Delete test scenarios (Job 2) +├── datapath.go # Resource orchestration +└── helpers/ + └── az_helpers.go # Azure/kubectl helper functions +``` diff --git a/.pipelines/swiftv2-long-running/pipeline.yaml b/.pipelines/swiftv2-long-running/pipeline.yaml index b6d085901d..042af7634b 100644 --- a/.pipelines/swiftv2-long-running/pipeline.yaml +++ b/.pipelines/swiftv2-long-running/pipeline.yaml @@ -1,4 +1,13 @@ trigger: none +pr: none + +schedules: + - cron: "0 */3 * * *" # Every 3 hours at minute 0 + displayName: "Run tests every 3 hours" + branches: + include: + - master + always: true # Run even if there are no code changes parameters: - name: subscriptionId @@ -11,25 +20,16 @@ parameters: type: string default: "centraluseuap" - - name: resourceGroupName - displayName: "Resource Group Name" - type: string - default: "long-run-$(Build.BuildId)" - - - name: vmSkuDefault - displayName: "VM SKU for Default Node Pool" - type: string - default: "Standard_D2s_v3" - - - name: vmSkuHighNIC - displayName: "VM SKU for High NIC Node Pool" - type: string - default: "Standard_D16s_v3" + - name: runSetupStages + displayName: "Create New Infrastructure Setup" + type: boolean + default: false - - name: serviceConnection - displayName: "Azure Service Connection" + # Setup-only parameters (only used when runSetupStages=true) + - name: resourceGroupName + displayName: "Resource Group Name used when Create new Infrastructure Setup is selected" type: string - default: "Azure Container Networking - Standalone Test Service Connection" + default: "sv2-long-run-$(Build.BuildId)" extends: template: template/long-running-pipeline-template.yaml @@ -37,6 +37,4 @@ extends: subscriptionId: ${{ parameters.subscriptionId }} location: ${{ parameters.location }} resourceGroupName: ${{ parameters.resourceGroupName }} - vmSkuDefault: ${{ parameters.vmSkuDefault }} - vmSkuHighNIC: ${{ parameters.vmSkuHighNIC }} - serviceConnection: ${{ parameters.serviceConnection }} + runSetupStages: ${{ parameters.runSetupStages }} diff --git a/.pipelines/swiftv2-long-running/scripts/create_aks.sh b/.pipelines/swiftv2-long-running/scripts/create_aks.sh index 4ab38c0f42..8cc2802de2 100644 --- a/.pipelines/swiftv2-long-running/scripts/create_aks.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_aks.sh @@ -6,58 +6,106 @@ LOCATION=$2 RG=$3 VM_SKU_DEFAULT=$4 VM_SKU_HIGHNIC=$5 +DELEGATOR_APP_NAME=$6 +DELEGATOR_RG=$7 +DELEGATOR_SUB=$8 +DELEGATOR_BASE_URL=${9:-"http://localhost:8080"} -CLUSTER_COUNT=2 -CLUSTER_PREFIX="aks" -DEFAULT_NODE_COUNT=1 -COMMON_TAGS="fastpathenabled=true RGOwner=LongRunningTestPipelines stampcreatorserviceinfo=true" +CLUSTER_COUNT=2 +CLUSTER_PREFIX="aks" -wait_for_provisioning() { # Helper for safe retry/wait for provisioning states (basic) - local rg="$1" clusterName="$2" - echo "Waiting for AKS '$clusterName' in RG '$rg' to reach Succeeded/Failed (polling)..." - while :; do + +stamp_vnet() { + local vnet_id="$1" + + responseFile="response.txt" + modified_vnet="${vnet_id//\//%2F}" + cmd_stamp_curl="'curl -v -X PUT ${DELEGATOR_BASE_URL}/VirtualNetwork/$modified_vnet/stampcreatorservicename'" + cmd_containerapp_exec="az containerapp exec -n $DELEGATOR_APP_NAME -g $DELEGATOR_RG --subscription $DELEGATOR_SUB --command $cmd_stamp_curl" + + max_retries=10 + sleep_seconds=15 + retry_count=0 + + while [[ $retry_count -lt $max_retries ]]; do + script --quiet -c "$cmd_containerapp_exec" "$responseFile" + if grep -qF "200 OK" "$responseFile"; then + echo "Subnet Delegator successfully stamped the vnet" + return 0 + else + echo "Subnet Delegator failed to stamp the vnet, attempt $((retry_count+1))" + cat "$responseFile" + retry_count=$((retry_count+1)) + sleep "$sleep_seconds" + fi + done + + echo "Failed to stamp the vnet even after $max_retries attempts" + exit 1 +} + +wait_for_provisioning() { + local rg="$1" clusterName="$2" + echo "Waiting for AKS '$clusterName' in RG '$rg'..." + local max_attempts=40 + local attempt=0 + + while [[ $attempt -lt $max_attempts ]]; do state=$(az aks show --resource-group "$rg" --name "$clusterName" --query provisioningState -o tsv 2>/dev/null || true) - if [ -z "$state" ]; then - sleep 3 - continue + echo "Attempt $((attempt+1))/$max_attempts - Provisioning state: $state" + + if [[ "$state" =~ Succeeded ]]; then + echo "Provisioning succeeded" + return 0 + fi + if [[ "$state" =~ Failed|Canceled ]]; then + echo "Provisioning finished with state: $state" + return 1 fi - case "$state" in - Succeeded|Succeeded*) echo "Provisioning state: $state"; break ;; - Failed|Canceled|Rejected) echo "Provisioning finished with state: $state"; break ;; - *) printf "."; sleep 6 ;; - esac + + attempt=$((attempt+1)) + sleep 15 done + + echo "Timeout waiting for AKS cluster provisioning after $((max_attempts * 15)) seconds" + return 1 } - for i in $(seq 1 "$CLUSTER_COUNT"); do - echo "==============================" - echo " Working on cluster set #$i" - echo "==============================" - - CLUSTER_NAME="${CLUSTER_PREFIX}-${i}" - echo "Creating AKS cluster '$CLUSTER_NAME' in RG '$RG'" - - make -C ./hack/aks azcfg AZCLI=az REGION=$LOCATION - - make -C ./hack/aks swiftv2-podsubnet-cluster-up \ - AZCLI=az REGION=$LOCATION \ - SUB=$SUBSCRIPTION_ID \ - GROUP=$RG \ - CLUSTER=$CLUSTER_NAME \ - NODE_COUNT=$DEFAULT_NODE_COUNT \ - VM_SIZE=$VM_SKU_DEFAULT \ - - echo " - waiting for AKS provisioning state..." - wait_for_provisioning "$RG" "$CLUSTER_NAME" - - echo "Adding multi-tenant nodepool ' to '$CLUSTER_NAME'" - make -C ./hack/aks linux-swiftv2-nodepool-up \ - AZCLI=az REGION=$LOCATION \ - GROUP=$RG \ - VM_SIZE=$VM_SKU_HIGHNIC \ - CLUSTER=$CLUSTER_NAME \ - SUB=$SUBSCRIPTION_ID \ + echo "Creating cluster #$i..." + + CLUSTER_NAME="${CLUSTER_PREFIX}-${i}" + make -C ./hack/aks azcfg AZCLI=az REGION=$LOCATION + make -C ./hack/aks swiftv2-podsubnet-cluster-up \ + AZCLI=az REGION=$LOCATION \ + SUB=$SUBSCRIPTION_ID \ + GROUP=$RG \ + CLUSTER=$CLUSTER_NAME \ + VM_SIZE=$VM_SKU_DEFAULT + wait_for_provisioning "$RG" "$CLUSTER_NAME" + + vnet_id=$(az network vnet show -g "$RG" --name "$CLUSTER_NAME" --query id -o tsv) + stamp_vnet "$vnet_id" + + make -C ./hack/aks linux-swiftv2-nodepool-up \ + AZCLI=az REGION=$LOCATION \ + GROUP=$RG \ + VM_SIZE=$VM_SKU_HIGHNIC \ + CLUSTER=$CLUSTER_NAME \ + SUB=$SUBSCRIPTION_ID + + az aks get-credentials -g "$RG" -n "$CLUSTER_NAME" --admin --overwrite-existing \ + --file "/tmp/${CLUSTER_NAME}.kubeconfig" + + echo "Labeling all nodes in $CLUSTER_NAME with workload-type=swiftv2-linux" + kubectl --kubeconfig "/tmp/${CLUSTER_NAME}.kubeconfig" label nodes --all workload-type=swiftv2-linux --overwrite + + echo "Labeling default nodepool (nodepool1) nodes with nic-capacity=low-nic" + kubectl --kubeconfig "/tmp/${CLUSTER_NAME}.kubeconfig" label nodes -l agentpool=nodepool1 nic-capacity=low-nic --overwrite + + echo "Labeling nplinux nodepool nodes with nic-capacity=high-nic" + kubectl --kubeconfig "/tmp/${CLUSTER_NAME}.kubeconfig" label nodes -l agentpool=nplinux nic-capacity=high-nic --overwrite done -echo "All done. Created $CLUSTER_COUNT cluster set(s)." + +echo "All clusters complete." diff --git a/.pipelines/swiftv2-long-running/scripts/create_nsg.sh b/.pipelines/swiftv2-long-running/scripts/create_nsg.sh old mode 100644 new mode 100755 index cec91cd7cf..0a18c0a47f --- a/.pipelines/swiftv2-long-running/scripts/create_nsg.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_nsg.sh @@ -5,15 +5,49 @@ trap 'echo "[ERROR] Failed during NSG creation or rule setup." >&2' ERR SUBSCRIPTION_ID=$1 RG=$2 LOCATION=$3 +VNET_A1="cx_vnet_v1" +SUBNET1_PREFIX=$(az network vnet subnet show -g "$RG" --vnet-name "$VNET_A1" -n s1 --query "addressPrefix" -o tsv) +SUBNET2_PREFIX=$(az network vnet subnet show -g "$RG" --vnet-name "$VNET_A1" -n s2 --query "addressPrefix" -o tsv) -VNET_A1="cx_vnet_a1" -SUBNET1_PREFIX="10.10.1.0/24" -SUBNET2_PREFIX="10.10.2.0/24" -NSG_NAME="${VNET_A1}-nsg" +echo "Subnet s1 CIDR: $SUBNET1_PREFIX" +echo "Subnet s2 CIDR: $SUBNET2_PREFIX" + +if [[ -z "$SUBNET1_PREFIX" || -z "$SUBNET2_PREFIX" ]]; then + echo "[ERROR] Failed to retrieve subnet address prefixes!" >&2 + exit 1 +fi + +echo "Retrieving NSGs associated with subnets..." +max_retries=10 +retry_count=0 +retry_delay=30 + +while [[ $retry_count -lt $max_retries ]]; do + NSG_S1_ID=$(az network vnet subnet show -g "$RG" --vnet-name "$VNET_A1" -n s1 --query "networkSecurityGroup.id" -o tsv 2>/dev/null || echo "") + NSG_S2_ID=$(az network vnet subnet show -g "$RG" --vnet-name "$VNET_A1" -n s2 --query "networkSecurityGroup.id" -o tsv 2>/dev/null || echo "") + + if [[ -n "$NSG_S1_ID" && -n "$NSG_S2_ID" ]]; then + echo "[OK] Successfully retrieved NSG associations for both subnets" + break + fi + + retry_count=$((retry_count + 1)) + if [[ $retry_count -lt $max_retries ]]; then + echo "[RETRY $retry_count/$max_retries] NSG associations not ready yet. Waiting ${retry_delay}s before retry..." + sleep $retry_delay + else + echo "[ERROR] Failed to retrieve NSG associations after $max_retries attempts!" >&2 + exit 1 + fi +done + +NSG_S1_NAME=$(basename "$NSG_S1_ID") +NSG_S2_NAME=$(basename "$NSG_S2_ID") +echo "Subnet s1 NSG: $NSG_S1_NAME" +echo "Subnet s2 NSG: $NSG_S2_NAME" verify_nsg() { local rg="$1"; local name="$2" - echo "==> Verifying NSG: $name" if az network nsg show -g "$rg" -n "$name" &>/dev/null; then echo "[OK] Verified NSG $name exists." else @@ -24,7 +58,6 @@ verify_nsg() { verify_nsg_rule() { local rg="$1"; local nsg="$2"; local rule="$3" - echo "==> Verifying NSG rule: $rule in $nsg" if az network nsg rule show -g "$rg" --nsg-name "$nsg" -n "$rule" &>/dev/null; then echo "[OK] Verified NSG rule $rule exists in $nsg." else @@ -33,77 +66,193 @@ verify_nsg_rule() { fi } -verify_subnet_nsg_association() { - local rg="$1"; local vnet="$2"; local subnet="$3"; local nsg="$4" - echo "==> Verifying NSG association on subnet $subnet..." - local associated_nsg - associated_nsg=$(az network vnet subnet show -g "$rg" --vnet-name "$vnet" -n "$subnet" --query "networkSecurityGroup.id" -o tsv 2>/dev/null || echo "") - if [[ "$associated_nsg" == *"$nsg"* ]]; then - echo "[OK] Verified subnet $subnet is associated with NSG $nsg." - else - echo "[ERROR] Subnet $subnet is NOT associated with NSG $nsg!" >&2 - exit 1 - fi +wait_for_nsg() { + local rg="$1"; local name="$2" + echo "Waiting for NSG $name to become available..." + local max_attempts=30 + local attempt=0 + while [[ $attempt -lt $max_attempts ]]; do + if az network nsg show -g "$rg" -n "$name" &>/dev/null; then + local provisioning_state + provisioning_state=$(az network nsg show -g "$rg" -n "$name" --query "provisioningState" -o tsv) + if [[ "$provisioning_state" == "Succeeded" ]]; then + echo "[OK] NSG $name is available (provisioningState: $provisioning_state)." + return 0 + fi + echo "Waiting... NSG $name provisioningState: $provisioning_state" + fi + attempt=$((attempt + 1)) + sleep 10 + done + echo "[ERROR] NSG $name did not become available within the expected time!" >&2 + exit 1 } -# ------------------------------- -# 1. Create NSG -# ------------------------------- -echo "==> Creating Network Security Group: $NSG_NAME" -az network nsg create -g "$RG" -n "$NSG_NAME" -l "$LOCATION" --output none \ - && echo "[OK] NSG '$NSG_NAME' created." -verify_nsg "$RG" "$NSG_NAME" +wait_for_nsg "$RG" "$NSG_S1_NAME" -# ------------------------------- -# 2. Create NSG Rules -# ------------------------------- -echo "==> Creating NSG rule to DENY traffic from Subnet1 ($SUBNET1_PREFIX) to Subnet2 ($SUBNET2_PREFIX)" -az network nsg rule create \ - --resource-group "$RG" \ - --nsg-name "$NSG_NAME" \ - --name deny-subnet1-to-subnet2 \ - --priority 100 \ - --source-address-prefixes "$SUBNET1_PREFIX" \ - --destination-address-prefixes "$SUBNET2_PREFIX" \ - --direction Inbound \ - --access Deny \ - --protocol "*" \ - --description "Deny all traffic from Subnet1 to Subnet2" \ - --output none \ - && echo "[OK] Deny rule from Subnet1 → Subnet2 created." - -verify_nsg_rule "$RG" "$NSG_NAME" "deny-subnet1-to-subnet2" - -echo "==> Creating NSG rule to DENY traffic from Subnet2 ($SUBNET2_PREFIX) to Subnet1 ($SUBNET1_PREFIX)" -az network nsg rule create \ - --resource-group "$RG" \ - --nsg-name "$NSG_NAME" \ - --name deny-subnet2-to-subnet1 \ - --priority 200 \ - --source-address-prefixes "$SUBNET2_PREFIX" \ - --destination-address-prefixes "$SUBNET1_PREFIX" \ - --direction Inbound \ - --access Deny \ - --protocol "*" \ - --description "Deny all traffic from Subnet2 to Subnet1" \ - --output none \ - && echo "[OK] Deny rule from Subnet2 → Subnet1 created." - -verify_nsg_rule "$RG" "$NSG_NAME" "deny-subnet2-to-subnet1" - -# ------------------------------- -# 3. Associate NSG with Subnets -# ------------------------------- -for SUBNET in s1 s2; do - echo "==> Associating NSG $NSG_NAME with subnet $SUBNET" - az network vnet subnet update \ - --name "$SUBNET" \ - --vnet-name "$VNET_A1" \ +if [[ "$NSG_S1_NAME" == "$NSG_S2_NAME" ]]; then + echo "Both subnets share the same NSG: $NSG_S1_NAME" + echo "Creating all NSG rules on shared NSG with unique priorities" + + echo "Creating NSG rule on $NSG_S1_NAME to DENY OUTBOUND traffic from Subnet1 ($SUBNET1_PREFIX) to Subnet2 ($SUBNET2_PREFIX)" + az network nsg rule create \ --resource-group "$RG" \ - --network-security-group "$NSG_NAME" \ - --output none - verify_subnet_nsg_association "$RG" "$VNET_A1" "$SUBNET" "$NSG_NAME" -done - -echo "NSG '$NSG_NAME' created successfully with bidirectional isolation between Subnet1 and Subnet2." + --nsg-name "$NSG_S1_NAME" \ + --name deny-s1-to-s2-outbound \ + --priority 100 \ + --source-address-prefixes "$SUBNET1_PREFIX" \ + --destination-address-prefixes "$SUBNET2_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Outbound \ + --access Deny \ + --protocol "*" \ + --description "Deny outbound traffic from Subnet1 to Subnet2" \ + --output none \ + && echo "[OK] Deny outbound rule from Subnet1 → Subnet2 created on $NSG_S1_NAME." + + verify_nsg_rule "$RG" "$NSG_S1_NAME" "deny-s1-to-s2-outbound" + + echo "Creating NSG rule on $NSG_S1_NAME to DENY INBOUND traffic from Subnet2 ($SUBNET2_PREFIX) to Subnet1 ($SUBNET1_PREFIX)" + az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_S1_NAME" \ + --name deny-s2-to-s1-inbound \ + --priority 100 \ + --source-address-prefixes "$SUBNET2_PREFIX" \ + --destination-address-prefixes "$SUBNET1_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Inbound \ + --access Deny \ + --protocol "*" \ + --description "Deny inbound traffic from Subnet2 to Subnet1" \ + --output none \ + && echo "[OK] Deny inbound rule from Subnet2 → Subnet1 created on $NSG_S1_NAME." + + verify_nsg_rule "$RG" "$NSG_S1_NAME" "deny-s2-to-s1-inbound" + + echo "Creating NSG rule on $NSG_S1_NAME to DENY OUTBOUND traffic from Subnet2 ($SUBNET2_PREFIX) to Subnet1 ($SUBNET1_PREFIX)" + az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_S1_NAME" \ + --name deny-s2-to-s1-outbound \ + --priority 110 \ + --source-address-prefixes "$SUBNET2_PREFIX" \ + --destination-address-prefixes "$SUBNET1_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Outbound \ + --access Deny \ + --protocol "*" \ + --description "Deny outbound traffic from Subnet2 to Subnet1" \ + --output none \ + && echo "[OK] Deny outbound rule from Subnet2 → Subnet1 created on $NSG_S1_NAME." + + verify_nsg_rule "$RG" "$NSG_S1_NAME" "deny-s2-to-s1-outbound" + + echo "Creating NSG rule on $NSG_S1_NAME to DENY INBOUND traffic from Subnet1 ($SUBNET1_PREFIX) to Subnet2 ($SUBNET2_PREFIX)" + az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_S1_NAME" \ + --name deny-s1-to-s2-inbound \ + --priority 110 \ + --source-address-prefixes "$SUBNET1_PREFIX" \ + --destination-address-prefixes "$SUBNET2_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Inbound \ + --access Deny \ + --protocol "*" \ + --description "Deny inbound traffic from Subnet1 to Subnet2" \ + --output none \ + && echo "[OK] Deny inbound rule from Subnet1 → Subnet2 created on $NSG_S1_NAME." + + verify_nsg_rule "$RG" "$NSG_S1_NAME" "deny-s1-to-s2-inbound" + echo "NSG rules applied successfully on shared NSG $NSG_S1_NAME with bidirectional isolation between Subnet1 and Subnet2." +else + echo "Subnets have different NSGs" + echo "Subnet s1 NSG: $NSG_S1_NAME" + echo "Subnet s2 NSG: $NSG_S2_NAME" + + wait_for_nsg "$RG" "$NSG_S2_NAME" + + echo "Creating NSG rule on $NSG_S1_NAME to DENY OUTBOUND traffic from Subnet1 ($SUBNET1_PREFIX) to Subnet2 ($SUBNET2_PREFIX)" + az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_S1_NAME" \ + --name deny-s1-to-s2-outbound \ + --priority 100 \ + --source-address-prefixes "$SUBNET1_PREFIX" \ + --destination-address-prefixes "$SUBNET2_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Outbound \ + --access Deny \ + --protocol "*" \ + --description "Deny outbound traffic from Subnet1 to Subnet2" \ + --output none \ + && echo "[OK] Deny outbound rule from Subnet1 → Subnet2 created on $NSG_S1_NAME." + + verify_nsg_rule "$RG" "$NSG_S1_NAME" "deny-s1-to-s2-outbound" + + echo "Creating NSG rule on $NSG_S1_NAME to DENY INBOUND traffic from Subnet2 ($SUBNET2_PREFIX) to Subnet1 ($SUBNET1_PREFIX)" + az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_S1_NAME" \ + --name deny-s2-to-s1-inbound \ + --priority 110 \ + --source-address-prefixes "$SUBNET2_PREFIX" \ + --destination-address-prefixes "$SUBNET1_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Inbound \ + --access Deny \ + --protocol "*" \ + --description "Deny inbound traffic from Subnet2 to Subnet1" \ + --output none \ + && echo "[OK] Deny inbound rule from Subnet2 → Subnet1 created on $NSG_S1_NAME." + + verify_nsg_rule "$RG" "$NSG_S1_NAME" "deny-s2-to-s1-inbound" + + echo "Creating NSG rule on $NSG_S2_NAME to DENY OUTBOUND traffic from Subnet2 ($SUBNET2_PREFIX) to Subnet1 ($SUBNET1_PREFIX)" + az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_S2_NAME" \ + --name deny-s2-to-s1-outbound \ + --priority 100 \ + --source-address-prefixes "$SUBNET2_PREFIX" \ + --destination-address-prefixes "$SUBNET1_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Outbound \ + --access Deny \ + --protocol "*" \ + --description "Deny outbound traffic from Subnet2 to Subnet1" \ + --output none \ + && echo "[OK] Deny outbound rule from Subnet2 → Subnet1 created on $NSG_S2_NAME." + + verify_nsg_rule "$RG" "$NSG_S2_NAME" "deny-s2-to-s1-outbound" + + echo "Creating NSG rule on $NSG_S2_NAME to DENY INBOUND traffic from Subnet1 ($SUBNET1_PREFIX) to Subnet2 ($SUBNET2_PREFIX)" + az network nsg rule create \ + --resource-group "$RG" \ + --nsg-name "$NSG_S2_NAME" \ + --name deny-s1-to-s2-inbound \ + --priority 110 \ + --source-address-prefixes "$SUBNET1_PREFIX" \ + --destination-address-prefixes "$SUBNET2_PREFIX" \ + --source-port-ranges "*" \ + --destination-port-ranges "*" \ + --direction Inbound \ + --access Deny \ + --protocol "*" \ + --description "Deny inbound traffic from Subnet1 to Subnet2" \ + --output none \ + && echo "[OK] Deny inbound rule from Subnet1 → Subnet2 created on $NSG_S2_NAME." + + verify_nsg_rule "$RG" "$NSG_S2_NAME" "deny-s1-to-s2-inbound" + + echo "NSG rules applied successfully on $NSG_S1_NAME and $NSG_S2_NAME with bidirectional isolation between Subnet1 and Subnet2." +fi diff --git a/.pipelines/swiftv2-long-running/scripts/create_pe.sh b/.pipelines/swiftv2-long-running/scripts/create_pe.sh index c9f7e782e0..6879f3cbb0 100644 --- a/.pipelines/swiftv2-long-running/scripts/create_pe.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_pe.sh @@ -5,21 +5,17 @@ trap 'echo "[ERROR] Failed during Private Endpoint or DNS setup." >&2' ERR SUBSCRIPTION_ID=$1 LOCATION=$2 RG=$3 -SA1_NAME=$4 # Storage account 1 +SA1_NAME=$4 -VNET_A1="cx_vnet_a1" -VNET_A2="cx_vnet_a2" -VNET_A3="cx_vnet_a3" +VNET_A1="cx_vnet_v1" +VNET_A2="cx_vnet_v2" +VNET_A3="cx_vnet_v3" SUBNET_PE_A1="pe" PE_NAME="${SA1_NAME}-pe" PRIVATE_DNS_ZONE="privatelink.blob.core.windows.net" -# ------------------------------- -# Function: Verify Resource Exists -# ------------------------------- verify_dns_zone() { local rg="$1"; local zone="$2" - echo "==> Verifying Private DNS zone: $zone" if az network private-dns zone show -g "$rg" -n "$zone" &>/dev/null; then echo "[OK] Verified DNS zone $zone exists." else @@ -30,7 +26,6 @@ verify_dns_zone() { verify_dns_link() { local rg="$1"; local zone="$2"; local link="$3" - echo "==> Verifying DNS link: $link for zone $zone" if az network private-dns link vnet show -g "$rg" --zone-name "$zone" -n "$link" &>/dev/null; then echo "[OK] Verified DNS link $link exists." else @@ -41,7 +36,6 @@ verify_dns_link() { verify_private_endpoint() { local rg="$1"; local name="$2" - echo "==> Verifying Private Endpoint: $name" if az network private-endpoint show -g "$rg" -n "$name" &>/dev/null; then echo "[OK] Verified Private Endpoint $name exists." else @@ -50,17 +44,14 @@ verify_private_endpoint() { fi } -# 1. Create Private DNS zone -echo "==> Creating Private DNS zone: $PRIVATE_DNS_ZONE" +echo "Creating Private DNS zone: $PRIVATE_DNS_ZONE" az network private-dns zone create -g "$RG" -n "$PRIVATE_DNS_ZONE" --output none \ && echo "[OK] DNS zone $PRIVATE_DNS_ZONE created." - verify_dns_zone "$RG" "$PRIVATE_DNS_ZONE" -# 2. Link DNS zone to VNet for VNET in "$VNET_A1" "$VNET_A2" "$VNET_A3"; do LINK_NAME="${VNET}-link" - echo "==> Linking DNS zone $PRIVATE_DNS_ZONE to VNet $VNET" + echo "Linking DNS zone $PRIVATE_DNS_ZONE to VNet $VNET" az network private-dns link vnet create \ -g "$RG" -n "$LINK_NAME" \ --zone-name "$PRIVATE_DNS_ZONE" \ @@ -71,9 +62,32 @@ for VNET in "$VNET_A1" "$VNET_A2" "$VNET_A3"; do verify_dns_link "$RG" "$PRIVATE_DNS_ZONE" "$LINK_NAME" done -# 3. Create Private Endpoint -echo "==> Creating Private Endpoint for Storage Account: $SA1_NAME" +echo "Linking DNS zone to AKS cluster VNets" +for CLUSTER in "aks-1" "aks-2"; do + echo "Getting VNet for $CLUSTER" + AKS_VNET_ID=$(az aks show -g "$RG" -n "$CLUSTER" --query "agentPoolProfiles[0].vnetSubnetId" -o tsv | cut -d'/' -f1-9) + + if [ -z "$AKS_VNET_ID" ]; then + echo "[WARNING] Could not get VNet for $CLUSTER, skipping DNS link" + continue + fi + + LINK_NAME="${CLUSTER}-vnet-link" + echo "Linking DNS zone to $CLUSTER VNet" + az network private-dns link vnet create \ + -g "$RG" -n "$LINK_NAME" \ + --zone-name "$PRIVATE_DNS_ZONE" \ + --virtual-network "$AKS_VNET_ID" \ + --registration-enabled false \ + --output none \ + && echo "[OK] Linked DNS zone to $CLUSTER VNet." + verify_dns_link "$RG" "$PRIVATE_DNS_ZONE" "$LINK_NAME" +done + +echo "Creating Private Endpoint for Storage Account: $SA1_NAME" SA1_ID=$(az storage account show -g "$RG" -n "$SA1_NAME" --query id -o tsv) +DNS_ZONE_ID=$(az network private-dns zone show -g "$RG" -n "$PRIVATE_DNS_ZONE" --query id -o tsv) + az network private-endpoint create \ -g "$RG" -n "$PE_NAME" -l "$LOCATION" \ --vnet-name "$VNET_A1" --subnet "$SUBNET_PE_A1" \ @@ -84,4 +98,14 @@ az network private-endpoint create \ && echo "[OK] Private Endpoint $PE_NAME created for $SA1_NAME." verify_private_endpoint "$RG" "$PE_NAME" +echo "Creating Private DNS Zone Group to register DNS record" +az network private-endpoint dns-zone-group create \ + -g "$RG" \ + --endpoint-name "$PE_NAME" \ + --name "default" \ + --private-dns-zone "$DNS_ZONE_ID" \ + --zone-name "blob" \ + --output none \ + && echo "[OK] DNS Zone Group created, DNS record will be auto-registered." + echo "All Private DNS and Endpoint resources created and verified successfully." diff --git a/.pipelines/swiftv2-long-running/scripts/create_peerings.sh b/.pipelines/swiftv2-long-running/scripts/create_peerings.sh index d6655492f1..d3aa8ae561 100644 --- a/.pipelines/swiftv2-long-running/scripts/create_peerings.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_peerings.sh @@ -3,14 +3,13 @@ set -e trap 'echo "[ERROR] Failed during VNet peering creation." >&2' ERR RG=$1 -VNET_A1="cx_vnet_a1" -VNET_A2="cx_vnet_a2" -VNET_A3="cx_vnet_a3" -VNET_B1="cx_vnet_b1" +VNET_A1="cx_vnet_v1" +VNET_A2="cx_vnet_v2" +VNET_A3="cx_vnet_v3" +VNET_B1="cx_vnet_v4" verify_peering() { local rg="$1"; local vnet="$2"; local peering="$3" - echo "==> Verifying peering $peering on $vnet..." if az network vnet peering show -g "$rg" --vnet-name "$vnet" -n "$peering" --query "peeringState" -o tsv | grep -q "Connected"; then echo "[OK] Peering $peering on $vnet is Connected." else @@ -21,13 +20,11 @@ verify_peering() { peer_two_vnets() { local rg="$1"; local v1="$2"; local v2="$3"; local name12="$4"; local name21="$5" - echo "==> Peering $v1 <-> $v2" az network vnet peering create -g "$rg" -n "$name12" --vnet-name "$v1" --remote-vnet "$v2" --allow-vnet-access --output none \ && echo "Created peering $name12" az network vnet peering create -g "$rg" -n "$name21" --vnet-name "$v2" --remote-vnet "$v1" --allow-vnet-access --output none \ && echo "Created peering $name21" - # Verify both peerings are active verify_peering "$rg" "$v1" "$name12" verify_peering "$rg" "$v2" "$name21" } @@ -35,4 +32,4 @@ peer_two_vnets() { peer_two_vnets "$RG" "$VNET_A1" "$VNET_A2" "A1-to-A2" "A2-to-A1" peer_two_vnets "$RG" "$VNET_A2" "$VNET_A3" "A2-to-A3" "A3-to-A2" peer_two_vnets "$RG" "$VNET_A1" "$VNET_A3" "A1-to-A3" "A3-to-A1" -echo "All VNet peerings created and verified successfully." +echo "VNet peerings created and verified." diff --git a/.pipelines/swiftv2-long-running/scripts/create_storage.sh b/.pipelines/swiftv2-long-running/scripts/create_storage.sh index caefc69294..36286c96e8 100644 --- a/.pipelines/swiftv2-long-running/scripts/create_storage.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_storage.sh @@ -10,12 +10,9 @@ RAND=$(openssl rand -hex 4) SA1="sa1${RAND}" SA2="sa2${RAND}" -# Set subscription context az account set --subscription "$SUBSCRIPTION_ID" - -# Create storage accounts for SA in "$SA1" "$SA2"; do - echo "==> Creating storage account $SA" + echo "Creating storage account $SA" az storage account create \ --name "$SA" \ --resource-group "$RG" \ @@ -28,8 +25,7 @@ for SA in "$SA1" "$SA2"; do --min-tls-version TLS1_2 \ --query "name" -o tsv \ && echo "Storage account $SA created successfully." - # Verify creation success - echo "==> Verifying storage account $SA exists..." + if az storage account show --name "$SA" --resource-group "$RG" &>/dev/null; then echo "[OK] Storage account $SA verified successfully." else @@ -38,9 +34,32 @@ for SA in "$SA1" "$SA2"; do fi done +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +bash "$SCRIPT_DIR/manage_storage_rbac.sh" assign "$SUBSCRIPTION_ID" "$RG" "$SA1 $SA2" + +for SA in "$SA1" "$SA2"; do + echo "Creating test container in $SA" + az storage container create \ + --name "test" \ + --account-name "$SA" \ + --auth-mode login \ + && echo "[OK] Container 'test' created in $SA" + + echo "Uploading test blob to $SA" + az storage blob upload \ + --account-name "$SA" \ + --container-name "test" \ + --name "hello.txt" \ + --data "Hello from Private Endpoint - Storage: $SA" \ + --auth-mode login \ + --overwrite \ + && echo "[OK] Test blob 'hello.txt' uploaded to $SA/test/" +done + +echo "Removing RBAC role after blob upload" +bash "$SCRIPT_DIR/manage_storage_rbac.sh" delete "$SUBSCRIPTION_ID" "$RG" "$SA1 $SA2" echo "All storage accounts created and verified successfully." -# Set pipeline output variables set +x echo "##vso[task.setvariable variable=StorageAccount1;isOutput=true]$SA1" echo "##vso[task.setvariable variable=StorageAccount2;isOutput=true]$SA2" diff --git a/.pipelines/swiftv2-long-running/scripts/create_vnets.sh b/.pipelines/swiftv2-long-running/scripts/create_vnets.sh index eb894d06ff..9cb141b8de 100644 --- a/.pipelines/swiftv2-long-running/scripts/create_vnets.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_vnets.sh @@ -2,35 +2,29 @@ set -e trap 'echo "[ERROR] Failed while creating VNets or subnets. Check Azure CLI logs above." >&2' ERR -SUBSCRIPTION_ID=$1 +SUB_ID=$1 LOCATION=$2 RG=$3 +BUILD_ID=$4 +DELEGATOR_APP_NAME=$5 +DELEGATOR_RG=$6 +DELEGATOR_SUB=$7 +DELEGATOR_BASE_URL=${8:-"http://localhost:8080"} # Default to localhost:8080 if not provided -az account set --subscription "$SUBSCRIPTION_ID" +VNAMES=( "cx_vnet_v1" "cx_vnet_v2" "cx_vnet_v3" "cx_vnet_v4" ) +VCIDRS=( "172.16.0.0/16" "172.17.0.0/16" "172.18.0.0/16" "172.19.0.0/16" ) +NODE_SUBNETS=( "172.16.0.0/24" "172.17.0.0/24" "172.18.0.0/24" "172.19.0.0/24" ) +EXTRA_SUBNETS_LIST=( "s1 s2 pe" "s1" "s1" "s1" ) +EXTRA_CIDRS_LIST=( "172.16.1.0/24,172.16.2.0/24,172.16.3.0/24" \ + "172.17.1.0/24" \ + "172.18.1.0/24" \ + "172.19.1.0/24" ) +az account set --subscription "$SUB_ID" -# VNets and subnets -VNET_A1="cx_vnet_a1" -VNET_A2="cx_vnet_a2" -VNET_A3="cx_vnet_a3" -VNET_B1="cx_vnet_b1" - -A1_S1="10.10.1.0/24" -A1_S2="10.10.2.0/24" -A1_PE="10.10.100.0/24" - -A2_MAIN="10.11.1.0/24" - -A3_MAIN="10.12.1.0/24" - -B1_MAIN="10.20.1.0/24" - -# ------------------------------- -# Verification functions -# ------------------------------- verify_vnet() { - local rg="$1"; local vnet="$2" - echo "==> Verifying VNet: $vnet" - if az network vnet show -g "$rg" -n "$vnet" &>/dev/null; then + local vnet="$1" + echo "Verifying VNet: $vnet" + if az network vnet show -g "$RG" -n "$vnet" &>/dev/null; then echo "[OK] Verified VNet $vnet exists." else echo "[ERROR] VNet $vnet not found!" >&2 @@ -39,9 +33,9 @@ verify_vnet() { } verify_subnet() { - local rg="$1"; local vnet="$2"; local subnet="$3" - echo "==> Verifying subnet: $subnet in $vnet" - if az network vnet subnet show -g "$rg" --vnet-name "$vnet" -n "$subnet" &>/dev/null; then + local vnet="$1"; local subnet="$2" + echo "Verifying subnet: $subnet in $vnet" + if az network vnet subnet show -g "$RG" --vnet-name "$vnet" -n "$subnet" &>/dev/null; then echo "[OK] Verified subnet $subnet exists in $vnet." else echo "[ERROR] Subnet $subnet not found in $vnet!" >&2 @@ -49,36 +43,92 @@ verify_subnet() { fi } -# ------------------------------- -# Create VNets and Subnets -# ------------------------------- -# A1 -az network vnet create -g "$RG" -n "$VNET_A1" --address-prefix 10.10.0.0/16 --subnet-name s1 --subnet-prefix "$A1_S1" -l "$LOCATION" --output none \ - && echo "Created $VNET_A1 with subnet s1" -az network vnet subnet create -g "$RG" --vnet-name "$VNET_A1" -n s2 --address-prefix "$A1_S2" --output none \ - && echo "Created $VNET_A1 with subnet s2" -az network vnet subnet create -g "$RG" --vnet-name "$VNET_A1" -n pe --address-prefix "$A1_PE" --output none \ - && echo "Created $VNET_A1 with subnet pe" -# Verify A1 -verify_vnet "$RG" "$VNET_A1" -for sn in s1 s2 pe; do verify_subnet "$RG" "$VNET_A1" "$sn"; done +create_vnet_subets() { + local vnet="$1" + local vnet_cidr="$2" + local node_subnet_cidr="$3" + local extra_subnets="$4" + local extra_cidrs="$5" + + echo "Creating VNet: $vnet with CIDR: $vnet_cidr" + az network vnet create -g "$RG" -l "$LOCATION" --name "$vnet" --address-prefixes "$vnet_cidr" -o none -# A2 -az network vnet create -g "$RG" -n "$VNET_A2" --address-prefix 10.11.0.0/16 --subnet-name s1 --subnet-prefix "$A2_MAIN" -l "$LOCATION" --output none \ - && echo "Created $VNET_A2 with subnet s1" -verify_vnet "$RG" "$VNET_A2" -verify_subnet "$RG" "$VNET_A2" "s1" + IFS=' ' read -r -a extra_subnet_array <<< "$extra_subnets" + IFS=',' read -r -a extra_cidr_array <<< "$extra_cidrs" + + for i in "${!extra_subnet_array[@]}"; do + subnet_name="${extra_subnet_array[$i]}" + subnet_cidr="${extra_cidr_array[$i]}" + echo "Creating extra subnet: $subnet_name with CIDR: $subnet_cidr" + + # Only delegate pod subnets (not private endpoint subnets) + if [[ "$subnet_name" != "pe" ]]; then + az network vnet subnet create -g "$RG" \ + --vnet-name "$vnet" --name "$subnet_name" \ + --delegations Microsoft.SubnetDelegator/msfttestclients \ + --address-prefixes "$subnet_cidr" -o none + else + az network vnet subnet create -g "$RG" \ + --vnet-name "$vnet" --name "$subnet_name" \ + --address-prefixes "$subnet_cidr" -o none + fi + done +} + +delegate_subnet() { + local vnet="$1" + local subnet="$2" + local max_attempts=7 + local attempt=1 + + echo "Delegating subnet: $subnet in VNet: $vnet to Subnet Delegator" + subnet_id=$(az network vnet subnet show -g "$RG" --vnet-name "$vnet" -n "$subnet" --query id -o tsv) + modified_custsubnet="${subnet_id//\//%2F}" + + responseFile="delegate_response.txt" + cmd_delegator_curl="'curl -X PUT ${DELEGATOR_BASE_URL}/DelegatedSubnet/$modified_custsubnet'" + cmd_containerapp_exec="az containerapp exec -n $DELEGATOR_APP_NAME -g $DELEGATOR_RG --subscription $DELEGATOR_SUB --command $cmd_delegator_curl" + + while [ $attempt -le $max_attempts ]; do + echo "Attempt $attempt of $max_attempts..." + script --quiet -c "$cmd_containerapp_exec" "$responseFile" + + if grep -qF "success" "$responseFile"; then + echo "Subnet Delegator registered the subnet" + rm -f "$responseFile" + return 0 + else + echo "Subnet Delegator failed to register the subnet (attempt $attempt)" + cat "$responseFile" + if [ $attempt -lt $max_attempts ]; then + echo "Retrying in 5 seconds..." + sleep 5 + fi + fi + + ((attempt++)) + done + + echo "[ERROR] Failed to delegate subnet after $max_attempts attempts" + rm -f "$responseFile" + exit 1 +} -# A3 -az network vnet create -g "$RG" -n "$VNET_A3" --address-prefix 10.12.0.0/16 --subnet-name s1 --subnet-prefix "$A3_MAIN" -l "$LOCATION" --output none \ - && echo "Created $VNET_A3 with subnet s1" -verify_vnet "$RG" "$VNET_A3" -verify_subnet "$RG" "$VNET_A3" "s1" +for i in "${!VNAMES[@]}"; do + VNET=${VNAMES[$i]} + VNET_CIDR=${VCIDRS[$i]} + NODE_SUBNET_CIDR=${NODE_SUBNETS[$i]} + EXTRA_SUBNETS=${EXTRA_SUBNETS_LIST[$i]} + EXTRA_SUBNET_CIDRS=${EXTRA_CIDRS_LIST[$i]} -# B1 -az network vnet create -g "$RG" -n "$VNET_B1" --address-prefix 10.20.0.0/16 --subnet-name s1 --subnet-prefix "$B1_MAIN" -l "$LOCATION" --output none \ - && echo "Created $VNET_B1 with subnet s1" -verify_vnet "$RG" "$VNET_B1" -verify_subnet "$RG" "$VNET_B1" "s1" + create_vnet_subets "$VNET" "$VNET_CIDR" "$NODE_SUBNET_CIDR" "$EXTRA_SUBNETS" "$EXTRA_SUBNET_CIDRS" + verify_vnet "$VNET" + for PODSUBNET in $EXTRA_SUBNETS; do + verify_subnet "$VNET" "$PODSUBNET" + if [[ "$PODSUBNET" != "pe" ]]; then + delegate_subnet "$VNET" "$PODSUBNET" + fi + done +done -echo " All VNets and subnets created and verified successfully." +echo "All VNets and subnets created and verified successfully." \ No newline at end of file diff --git a/.pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh b/.pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh new file mode 100644 index 0000000000..f7fbd2d30a --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +set -e + +ACTION=$1 # "assign" or "delete" +SUBSCRIPTION_ID=$2 +RG=$3 +STORAGE_ACCOUNTS=$4 # Space-separated list of storage account names + +if [ "$ACTION" != "assign" ] && [ "$ACTION" != "delete" ]; then + echo "[ERROR] Invalid action. Use 'assign' or 'delete'" >&2 + exit 1 +fi + +az account set --subscription "$SUBSCRIPTION_ID" +SP_OBJECT_ID=$(az ad signed-in-user show --query id -o tsv 2>/dev/null || az account show --query user.name -o tsv) + +if [ "$ACTION" == "assign" ]; then + echo "Assigning Storage Blob Data Contributor role to service principal" + for SA in $STORAGE_ACCOUNTS; do + echo "Processing storage account: $SA" + SA_SCOPE="/subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${RG}/providers/Microsoft.Storage/storageAccounts/${SA}" + + EXISTING=$(az role assignment list \ + --assignee "$SP_OBJECT_ID" \ + --role "Storage Blob Data Contributor" \ + --scope "$SA_SCOPE" \ + --query "[].id" -o tsv) + + if [ -n "$EXISTING" ]; then + echo "[OK] Role assignment already exists for $SA" + continue + fi + + az role assignment create \ + --assignee "$SP_OBJECT_ID" \ + --role "Storage Blob Data Contributor" \ + --scope "$SA_SCOPE" \ + --output none \ + && echo "[OK] Role assigned to service principal for $SA" + done + +elif [ "$ACTION" == "delete" ]; then + echo "Removing Storage Blob Data Contributor role from service principal" + + for SA in $STORAGE_ACCOUNTS; do + echo "Processing storage account: $SA" + SA_SCOPE="/subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${RG}/providers/Microsoft.Storage/storageAccounts/${SA}" + + ASSIGNMENT_ID=$(az role assignment list \ + --assignee "$SP_OBJECT_ID" \ + --role "Storage Blob Data Contributor" \ + --scope "$SA_SCOPE" \ + --query "[0].id" -o tsv 2>/dev/null || echo "") + + if [ -z "$ASSIGNMENT_ID" ]; then + echo "[OK] No role assignment found for $SA (already deleted or never existed)" + continue + fi + + az role assignment delete --ids "$ASSIGNMENT_ID" --output none \ + && echo "[OK] Role removed from service principal for $SA" \ + || echo "[WARNING] Failed to remove role for $SA (may not exist)" + done + + echo "==> Performing sanity check to verify RBAC cleanup..." + + for SA in $STORAGE_ACCOUNTS; do + SA_SCOPE="/subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${RG}/providers/Microsoft.Storage/storageAccounts/${SA}" + + REMAINING=$(az role assignment list \ + --assignee "$SP_OBJECT_ID" \ + --role "Storage Blob Data Contributor" \ + --scope "$SA_SCOPE" \ + --query "[].id" -o tsv 2>/dev/null || echo "") + + if [ -n "$REMAINING" ]; then + echo "[ERROR] RBAC leak detected: Role assignment still exists for $SA after deletion!" >&2 + echo "Assignment ID(s): $REMAINING" >&2 + fi + done +fi + +echo "RBAC management completed successfully." diff --git a/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml index cc6016f17a..356495289c 100644 --- a/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml +++ b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml @@ -5,136 +5,343 @@ parameters: type: string - name: resourceGroupName type: string + - name: runSetupStages + type: boolean + default: false + +variables: + - name: rgName + ${{ if eq(parameters.runSetupStages, true) }}: + value: ${{ parameters.resourceGroupName }} + ${{ else }}: + value: sv2-long-run-${{ parameters.location }} - name: vmSkuDefault - type: string + value: "Standard_D4s_v3" - name: vmSkuHighNIC - type: string - - name: serviceConnection - type: string + value: "Standard_D16s_v3" stages: - stage: AKSClusterAndNetworking displayName: "Stage: AKS Cluster and Networking Setup" + condition: eq(${{ parameters.runSetupStages }}, true) jobs: - # ------------------------------------------------------------ - # Job 1: Create Resource Group - # ------------------------------------------------------------ - job: CreateResourceGroup displayName: "Create Resource Group" - pool: - vmImage: ubuntu-latest steps: - - checkout: self - task: AzureCLI@2 displayName: "Create resource group" inputs: - azureSubscription: ${{ parameters.serviceConnection }} + azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: inlineScript inlineScript: | - echo "==> Creating resource group ${{ parameters.resourceGroupName }} in ${{ parameters.location }}" + echo "==> Creating resource group $(rgName) in ${{ parameters.location }}" az group create \ - --name "${{ parameters.resourceGroupName }}" \ + --name "$(rgName)" \ --location "${{ parameters.location }}" \ --subscription "${{ parameters.subscriptionId }}" echo "Resource group created successfully." - # ------------------------------------------------------------ - # Job 2: Create AKS Clusters - # ------------------------------------------------------------ - job: CreateCluster displayName: "Create AKS Clusters" dependsOn: CreateResourceGroup - pool: - vmImage: ubuntu-latest steps: - - checkout: self - task: AzureCLI@2 - displayName: "Run create_aks.sh" + displayName: "Create AKS clusters" inputs: - azureSubscription: ${{ parameters.serviceConnection }} + azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: scriptPath scriptPath: ".pipelines/swiftv2-long-running/scripts/create_aks.sh" arguments: > ${{ parameters.subscriptionId }} ${{ parameters.location }} - ${{ parameters.resourceGroupName }} - ${{ parameters.vmSkuDefault }} - ${{ parameters.vmSkuHighNIC }} + $(rgName) + $(vmSkuDefault) + $(vmSkuHighNIC) + $(DELEGATOR_CONTAINER_APP_NAME) + $(DELEGATOR_RESOURCE_GROUP) + $(DELEGATOR_SUBSCRIPTION) - # ------------------------------------------------------------ - # Job 3: Networking & Storage - # ------------------------------------------------------------ - job: NetworkingAndStorage displayName: "Networking and Storage Setup" - dependsOn: CreateResourceGroup - pool: - vmImage: ubuntu-latest + dependsOn: CreateCluster steps: - - checkout: self - - # Task 1: Create VNets - task: AzureCLI@2 displayName: "Create customer vnets" inputs: - azureSubscription: ${{ parameters.serviceConnection }} + azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: scriptPath scriptPath: ".pipelines/swiftv2-long-running/scripts/create_vnets.sh" arguments: > ${{ parameters.subscriptionId }} ${{ parameters.location }} - ${{ parameters.resourceGroupName }} + $(rgName) + $(Build.BuildId) + $(DELEGATOR_CONTAINER_APP_NAME) + $(DELEGATOR_RESOURCE_GROUP) + $(DELEGATOR_SUBSCRIPTION) - # Task 2: Create Peerings - task: AzureCLI@2 displayName: "Create customer vnet peerings" inputs: - azureSubscription: ${{ parameters.serviceConnection }} + azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: scriptPath scriptPath: ".pipelines/swiftv2-long-running/scripts/create_peerings.sh" arguments: > - ${{ parameters.resourceGroupName }} + $(rgName) - # Task 3: Create Storage Accounts - task: AzureCLI@2 name: CreateStorageAccounts displayName: "Create storage accounts" inputs: - azureSubscription: ${{ parameters.serviceConnection }} + azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: scriptPath scriptPath: ".pipelines/swiftv2-long-running/scripts/create_storage.sh" arguments: > ${{ parameters.subscriptionId }} ${{ parameters.location }} - ${{ parameters.resourceGroupName }} + $(rgName) - # Task 4: Create NSG + - task: AzureCLI@2 + displayName: "Create Private Endpoint for Storage Account" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/create_pe.sh" + arguments: > + ${{ parameters.subscriptionId }} + ${{ parameters.location }} + $(rgName) + $(CreateStorageAccounts.StorageAccount1) + - task: AzureCLI@2 displayName: "Create network security groups to restrict access between subnets" inputs: - azureSubscription: ${{ parameters.serviceConnection }} + azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: scriptPath scriptPath: ".pipelines/swiftv2-long-running/scripts/create_nsg.sh" arguments: > ${{ parameters.subscriptionId }} - ${{ parameters.resourceGroupName }} + $(rgName) ${{ parameters.location }} + - stage: DataPathTests + displayName: "Stage: Swiftv2 Data Path Tests on Linux Managed Nodes" + dependsOn: AKSClusterAndNetworking + condition: or(eq(${{ parameters.runSetupStages }}, false), succeeded()) + variables: + storageAccount1: $[ stageDependencies.AKSClusterAndNetworking.NetworkingAndStorage.outputs['CreateStorageAccounts.StorageAccount1'] ] + storageAccount2: $[ stageDependencies.AKSClusterAndNetworking.NetworkingAndStorage.outputs['CreateStorageAccounts.StorageAccount2'] ] + jobs: + - job: SetupKubeconfig + displayName: "Setup Kubeconfig Files" + steps: + - task: AzureCLI@2 + displayName: "Generate and verify kubeconfig files" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Pipeline.Workspace)/kubeconfigs + + echo "==> Setting up kubeconfig for cluster aks-1" + az aks get-credentials \ + --resource-group $(rgName) \ + --name aks-1 \ + --file $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig \ + --overwrite-existing \ + --admin + + echo "==> Setting up kubeconfig for cluster aks-2" + az aks get-credentials \ + --resource-group $(rgName) \ + --name aks-2 \ + --file $(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig \ + --overwrite-existing \ + --admin + + echo "==> Verifying cluster aks-1 connectivity" + kubectl --kubeconfig $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig get nodes + + echo "==> Verifying cluster aks-2 connectivity" + kubectl --kubeconfig $(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig get nodes + + - task: PublishPipelineArtifact@1 + displayName: "Publish kubeconfig files" + inputs: + targetPath: $(Pipeline.Workspace)/kubeconfigs + artifactName: kubeconfigs + publishLocation: pipeline + + - job: CreatePods + displayName: "Create Swiftv2 Pods" + dependsOn: SetupKubeconfig + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs + targetPath: $(Pipeline.Workspace)/kubeconfigs - # Task 5: Create Private Endpoint - task: AzureCLI@2 - displayName: "Create Private Endpoint for Storage Account" + displayName: "Create pods" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Creating pods (8 scenarios)" + export RG="$(rgName)" + export BUILD_ID="$(rgName)" + export WORKLOAD_TYPE="swiftv2-linux" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=1h -tags=create_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + + - job: ConnectivityTests + displayName: "Test Pod-to-Pod Connectivity" + dependsOn: CreatePods + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Run Connectivity Tests" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Running connectivity tests" + export RG="$(rgName)" + export BUILD_ID="$(rgName)" + export WORKLOAD_TYPE="swiftv2-linux" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=30m -tags=connectivity_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + + - job: PrivateEndpointTests + displayName: "Test Private Endpoint Access" + dependsOn: ConnectivityTests + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + name: DiscoverStorageAccounts + displayName: "Discover storage accounts" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + STORAGE_ACCOUNT_1="$(storageAccount1)" + STORAGE_ACCOUNT_2="$(storageAccount2)" + + if [ -z "$STORAGE_ACCOUNT_1" ] || [ -z "$STORAGE_ACCOUNT_2" ]; then + echo "Storage account variables not set, discovering from resource group..." + STORAGE_ACCOUNT_1=$(az storage account list -g $(rgName) --query "[?starts_with(name, 'sa1')].name" -o tsv) + STORAGE_ACCOUNT_2=$(az storage account list -g $(rgName) --query "[?starts_with(name, 'sa2')].name" -o tsv) + echo "Discovered: STORAGE_ACCOUNT_1=$STORAGE_ACCOUNT_1, STORAGE_ACCOUNT_2=$STORAGE_ACCOUNT_2" + fi + + echo "##vso[task.setvariable variable=storageAccount1;isOutput=true]$STORAGE_ACCOUNT_1" + echo "##vso[task.setvariable variable=storageAccount2;isOutput=true]$STORAGE_ACCOUNT_2" + + - task: AzureCLI@2 + displayName: "Assign RBAC for SAS token generation" inputs: - azureSubscription: ${{ parameters.serviceConnection }} + azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: scriptPath - scriptPath: ".pipelines/swiftv2-long-running/scripts/create_pe.sh" + scriptPath: ".pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh" arguments: > + assign ${{ parameters.subscriptionId }} - ${{ parameters.location }} - ${{ parameters.resourceGroupName }} - $(CreateStorageAccounts.StorageAccount1) + $(rgName) + "$(DiscoverStorageAccounts.storageAccount1) $(DiscoverStorageAccounts.storageAccount2)" + + - task: AzureCLI@2 + displayName: "Run Private Endpoint Tests" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Running Private Endpoint connectivity tests" + export RG="$(rgName)" + export BUILD_ID="$(rgName)" + export WORKLOAD_TYPE="swiftv2-linux" + export STORAGE_ACCOUNT_1="$(DiscoverStorageAccounts.storageAccount1)" + export STORAGE_ACCOUNT_2="$(DiscoverStorageAccounts.storageAccount2)" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=30m -tags=private_endpoint_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Remove RBAC after tests" + condition: always() + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh" + arguments: > + delete + ${{ parameters.subscriptionId }} + $(rgName) + "$(DiscoverStorageAccounts.storageAccount1) $(DiscoverStorageAccounts.storageAccount2)" + + - job: DeleteTestResources + displayName: "Delete PodNetwork, PNI, and Pods" + dependsOn: + - CreatePods + - ConnectivityTests + - PrivateEndpointTests + condition: always() + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Delete Test Resources" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Deleting test resources (8 scenarios)" + export RG="$(rgName)" + export BUILD_ID="$(rgName)" + export WORKLOAD_TYPE="swiftv2-linux" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=1h -tags=delete_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + \ No newline at end of file diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 5e1c8f3f9b..3b31345ec5 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -29,6 +29,7 @@ PUBLIC_IPv6 ?= $(PUBLIC_IP_ID)/$(IP_PREFIX)-$(CLUSTER)-v6 KUBE_PROXY_JSON_PATH ?= ./kube-proxy.json LTS ?= false + # overrideable variables SUB ?= $(AZURE_SUBSCRIPTION) CLUSTER ?= $(USER)-$(REGION) @@ -280,22 +281,22 @@ swiftv2-dummy-cluster-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI clus --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ + --tags stampcreatorserviceinfo=true \ --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --no-ssh-key \ --yes @$(MAKE) set-kubeconf swiftv2-podsubnet-cluster-up: ipv4 swift-net-up ## Bring up a SWIFTv2 PodSubnet cluster - $(COMMON_AKS_FIELDS) + $(COMMON_AKS_FIELDS) \ --network-plugin azure \ - --nodepool-name nodepool1 \ - --load-balancer-outbound-ips $(PUBLIC_IPv4) \ + --node-vm-size $(VM_SIZE) \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ - --service-cidr "10.0.0.0/16" \ - --dns-service-ip "10.0.0.10" \ - --tags fastpathenabled=true RGOwner=LongRunningTestPipelines stampcreatorserviceinfo=true \ + --nodepool-tags fastpathenabled=true aks-nic-enable-multi-tenancy=true \ + --tags stampcreatorserviceinfo=true \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/NetworkingMultiTenancyPreview \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --yes @$(MAKE) set-kubeconf @@ -446,7 +447,7 @@ linux-swiftv2-nodepool-up: ## Add linux node pool to swiftv2 cluster --os-type Linux \ --max-pods 250 \ --subscription $(SUB) \ - --tags fastpathenabled=true,aks-nic-enable-multi-tenancy=true \ + --tags fastpathenabled=true aks-nic-enable-multi-tenancy=true stampcreatorserviceinfo=true\ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/NetworkingMultiTenancyPreview \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet diff --git a/test/integration/manifests/swiftv2/long-running-cluster/pod.yaml b/test/integration/manifests/swiftv2/long-running-cluster/pod.yaml new file mode 100644 index 0000000000..28b422d0d6 --- /dev/null +++ b/test/integration/manifests/swiftv2/long-running-cluster/pod.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ .PodName }} + namespace: {{ .Namespace }} + labels: + kubernetes.azure.com/pod-network-instance: {{ .PNIName }} + kubernetes.azure.com/pod-network: {{ .PNName }} +spec: + nodeSelector: + kubernetes.io/hostname: {{ .NodeName }} + containers: + - name: net-debugger + image: {{ .Image }} + command: ["/bin/bash", "-c"] + args: + - | + echo "Pod Network Diagnostics started on $(hostname)" + echo "Pod IP: $(hostname -i)" + echo "Starting TCP listener on port 8080" + + # Start netcat listener that responds to connections + while true; do + echo "TCP Connection Success from $(hostname) at $(date)" | nc -l -p 8080 + done + ports: + - containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 300m + memory: 600Mi + requests: + cpu: 300m + memory: 600Mi + securityContext: + privileged: true + restartPolicy: Always diff --git a/test/integration/manifests/swiftv2/long-running-cluster/podnetwork.yaml b/test/integration/manifests/swiftv2/long-running-cluster/podnetwork.yaml new file mode 100644 index 0000000000..25a7491d90 --- /dev/null +++ b/test/integration/manifests/swiftv2/long-running-cluster/podnetwork.yaml @@ -0,0 +1,15 @@ +apiVersion: multitenancy.acn.azure.com/v1alpha1 +kind: PodNetwork +metadata: + name: {{ .PNName }} +{{- if .SubnetToken }} + labels: + kubernetes.azure.com/override-subnet-token: "{{ .SubnetToken }}" +{{- end }} +spec: + networkID: "{{ .VnetGUID }}" +{{- if not .SubnetToken }} + subnetGUID: "{{ .SubnetGUID }}" +{{- end }} + subnetResourceID: "{{ .SubnetARMID }}" + deviceType: acn.azure.com/vnet-nic diff --git a/test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml b/test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml new file mode 100644 index 0000000000..4d1f8ca384 --- /dev/null +++ b/test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml @@ -0,0 +1,13 @@ +apiVersion: multitenancy.acn.azure.com/v1alpha1 +kind: PodNetworkInstance +metadata: + name: {{ .PNIName }} + namespace: {{ .Namespace }} +spec: + podNetworkConfigs: + - podNetwork: {{ .PNName }} + {{- if eq .Type "explicit" }} + podIPReservationSize: {{ .Reservations }} + {{- else }} + podIPReservationSize: 1 + {{- end }} diff --git a/test/integration/swiftv2/helpers/az_helpers.go b/test/integration/swiftv2/helpers/az_helpers.go new file mode 100644 index 0000000000..484e66b138 --- /dev/null +++ b/test/integration/swiftv2/helpers/az_helpers.go @@ -0,0 +1,405 @@ +package helpers + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strings" + "time" +) + +var ( + // ErrPodNotRunning is returned when a pod does not reach Running state + ErrPodNotRunning = errors.New("pod did not reach Running state") + // ErrPodNoIP is returned when a pod has no IP address assigned + ErrPodNoIP = errors.New("pod has no IP address assigned") + // ErrPodNoEth1IP is returned when a pod has no eth1 IP address (delegated subnet not configured) + ErrPodNoEth1IP = errors.New("pod has no eth1 IP address (delegated subnet not configured?)") + // ErrPodContainerNotReady is returned when a pod container is not ready + ErrPodContainerNotReady = errors.New("pod container not ready") + // ErrMTPNCStuckDeletion is returned when MTPNC resources are stuck and not deleted + ErrMTPNCStuckDeletion = errors.New("MTPNC resources should have been deleted but were found") +) + +func runAzCommand(cmd string, args ...string) (string, error) { + out, err := exec.Command(cmd, args...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to run %s %v: %w\nOutput: %s", cmd, args, err, string(out)) + } + return strings.TrimSpace(string(out)), nil +} + +func GetVnetGUID(rg, vnet string) (string, error) { + return runAzCommand("az", "network", "vnet", "show", "--resource-group", rg, "--name", vnet, "--query", "resourceGuid", "-o", "tsv") +} + +func GetSubnetARMID(rg, vnet, subnet string) (string, error) { + return runAzCommand("az", "network", "vnet", "subnet", "show", "--resource-group", rg, "--vnet-name", vnet, "--name", subnet, "--query", "id", "-o", "tsv") +} + +func GetSubnetGUID(rg, vnet, subnet string) (string, error) { + subnetID, err := GetSubnetARMID(rg, vnet, subnet) + if err != nil { + return "", err + } + return runAzCommand("az", "resource", "show", "--ids", subnetID, "--api-version", "2023-09-01", "--query", "properties.serviceAssociationLinks[0].properties.subnetId", "-o", "tsv") +} + +// GetClusterNodes returns a slice of node names from a cluster using the given kubeconfig +func GetClusterNodes(kubeconfig string) ([]string, error) { + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "nodes", "-o", "name") + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("failed to get nodes using kubeconfig %s: %w\nOutput: %s", kubeconfig, err, string(out)) + } + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + nodes := make([]string, 0, len(lines)) + + for _, line := range lines { + // kubectl returns "node/", we strip the prefix + if strings.HasPrefix(line, "node/") { + nodes = append(nodes, strings.TrimPrefix(line, "node/")) + } + } + return nodes, nil +} + +// EnsureNamespaceExists checks if a namespace exists and creates it if it doesn't +func EnsureNamespaceExists(kubeconfig, namespace string) error { + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "namespace", namespace) + err := cmd.Run() + + if err == nil { + return nil // Namespace exists + } + + // Namespace doesn't exist, create it + cmd = exec.Command("kubectl", "--kubeconfig", kubeconfig, "create", "namespace", namespace) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create namespace %s: %w\nOutput: %s", namespace, err, string(out)) + } + + return nil +} + +// DeletePod deletes a pod in the specified namespace and waits for it to be fully removed +func DeletePod(kubeconfig, namespace, podName string) error { + fmt.Printf("Deleting pod %s in namespace %s...\n", podName, namespace) + + // Initiate pod deletion with context timeout + ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "delete", "pod", podName, "-n", namespace, "--ignore-not-found=true") + out, err := cmd.CombinedOutput() + if err != nil { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + fmt.Printf("kubectl delete pod command timed out after 90s, attempting force delete...\n") + } else { + return fmt.Errorf("failed to delete pod %s in namespace %s: %w\nOutput: %s", podName, namespace, err, string(out)) + } + } + + // Wait for pod to be completely gone (critical for IP release) + fmt.Printf("Waiting for pod %s to be fully removed...\n", podName) + for attempt := 1; attempt <= 30; attempt++ { + checkCtx, checkCancel := context.WithTimeout(context.Background(), 10*time.Second) + checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "pod", podName, "-n", namespace, "--ignore-not-found=true", "-o", "name") + checkOut, _ := checkCmd.CombinedOutput() + checkCancel() + + if strings.TrimSpace(string(checkOut)) == "" { + fmt.Printf("Pod %s fully removed after %d seconds\n", podName, attempt*2) + // Extra wait to ensure IP reservation is released in DNC + time.Sleep(5 * time.Second) + return nil + } + + if attempt%5 == 0 { + fmt.Printf("Pod %s still terminating (attempt %d/30)...\n", podName, attempt) + } + time.Sleep(2 * time.Second) + } + + // If pod still exists after 60 seconds, force delete + fmt.Printf("Pod %s still exists after 60s, attempting force delete...\n", podName) + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + forceCmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "delete", "pod", podName, "-n", namespace, "--grace-period=0", "--force", "--ignore-not-found=true") + forceOut, forceErr := forceCmd.CombinedOutput() + if forceErr != nil { + fmt.Printf("Warning: Force delete failed: %s\n%s\n", forceErr, string(forceOut)) + } + + // Wait a bit more for force delete to complete + time.Sleep(10 * time.Second) + fmt.Printf("Pod %s deletion completed (may have required force)\n", podName) + return nil +} + +// DeletePodNetworkInstance deletes a PodNetworkInstance and waits for it to be removed +func DeletePodNetworkInstance(kubeconfig, namespace, pniName string) error { + fmt.Printf("Deleting PodNetworkInstance %s in namespace %s...\n", pniName, namespace) + + // Initiate PNI deletion + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "delete", "podnetworkinstance", pniName, "-n", namespace, "--ignore-not-found=true") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to delete PodNetworkInstance %s: %w\nOutput: %s", pniName, err, string(out)) + } + + // Wait for PNI to be completely gone (it may take time for DNC to release reservations) + fmt.Printf("Waiting for PodNetworkInstance %s to be fully removed...\n", pniName) + for attempt := 1; attempt <= 60; attempt++ { + checkCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "podnetworkinstance", pniName, "-n", namespace, "--ignore-not-found=true", "-o", "name") + checkOut, _ := checkCmd.CombinedOutput() + + if strings.TrimSpace(string(checkOut)) == "" { + fmt.Printf("PodNetworkInstance %s fully removed after %d seconds\n", pniName, attempt*2) + return nil + } + + if attempt%10 == 0 { + // Check for ReservationInUse errors + descCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "describe", "podnetworkinstance", pniName, "-n", namespace) + descOut, _ := descCmd.CombinedOutput() + descStr := string(descOut) + + if strings.Contains(descStr, "ReservationInUse") { + fmt.Printf("PNI %s still has active reservations (attempt %d/60). Waiting for DNC to release...\n", pniName, attempt) + } else { + fmt.Printf("PNI %s still terminating (attempt %d/60)...\n", pniName, attempt) + } + } + time.Sleep(2 * time.Second) + } + + // If PNI still exists after 120 seconds, try to remove finalizers + fmt.Printf("PNI %s still exists after 120s, attempting to remove finalizers...\n", pniName) + patchCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "patch", "podnetworkinstance", pniName, "-n", namespace, "-p", `{"metadata":{"finalizers":[]}}`, "--type=merge") + patchOut, patchErr := patchCmd.CombinedOutput() + if patchErr != nil { + fmt.Printf("Warning: Failed to remove finalizers: %s\n%s\n", patchErr, string(patchOut)) + } else { + fmt.Printf("Finalizers removed, waiting for deletion...\n") + time.Sleep(5 * time.Second) + } + + fmt.Printf("PodNetworkInstance %s deletion completed\n", pniName) + return nil +} + +// DeletePodNetwork deletes a PodNetwork and waits for it to be removed +func DeletePodNetwork(kubeconfig, pnName string) error { + fmt.Printf("Deleting PodNetwork %s...\n", pnName) + + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "delete", "podnetwork", pnName, "--ignore-not-found=true") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to delete PodNetwork %s: %w\nOutput: %s", pnName, err, string(out)) + } + + // Wait for PN to be completely gone + fmt.Printf("Waiting for PodNetwork %s to be fully removed...\n", pnName) + for attempt := 1; attempt <= 30; attempt++ { + checkCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "podnetwork", pnName, "--ignore-not-found=true", "-o", "name") + checkOut, _ := checkCmd.CombinedOutput() + + if strings.TrimSpace(string(checkOut)) == "" { + fmt.Printf("PodNetwork %s fully removed after %d seconds\n", pnName, attempt*2) + return nil + } + + if attempt%10 == 0 { + fmt.Printf("PodNetwork %s still terminating (attempt %d/30)...\n", pnName, attempt) + } + time.Sleep(2 * time.Second) + } + + // Try to remove finalizers if still stuck + fmt.Printf("PodNetwork %s still exists, attempting to remove finalizers...\n", pnName) + patchCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "patch", "podnetwork", pnName, "-p", `{"metadata":{"finalizers":[]}}`, "--type=merge") + patchOut, patchErr := patchCmd.CombinedOutput() + if patchErr != nil { + fmt.Printf("Warning: Failed to remove finalizers: %s\n%s\n", patchErr, string(patchOut)) + } + + time.Sleep(5 * time.Second) + fmt.Printf("PodNetwork %s deletion completed\n", pnName) + return nil +} + +// DeleteNamespace deletes a namespace and waits for it to be removed +func DeleteNamespace(kubeconfig, namespace string) error { + fmt.Printf("Deleting namespace %s...\n", namespace) + + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "delete", "namespace", namespace, "--ignore-not-found=true") + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to delete namespace %s: %w\nOutput: %s", namespace, err, string(out)) + } + + // Wait for namespace to be completely gone + fmt.Printf("Waiting for namespace %s to be fully removed...\n", namespace) + for attempt := 1; attempt <= 60; attempt++ { + checkCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "namespace", namespace, "--ignore-not-found=true", "-o", "name") + checkOut, _ := checkCmd.CombinedOutput() + + if strings.TrimSpace(string(checkOut)) == "" { + fmt.Printf("Namespace %s fully removed after %d seconds\n", namespace, attempt*2) + return nil + } + + if attempt%15 == 0 { + fmt.Printf("Namespace %s still terminating (attempt %d/60)...\n", namespace, attempt) + } + time.Sleep(2 * time.Second) + } + + // Try to remove finalizers if still stuck + fmt.Printf("Namespace %s still exists, attempting to remove finalizers...\n", namespace) + patchCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "patch", "namespace", namespace, "-p", `{"metadata":{"finalizers":[]}}`, "--type=merge") + patchOut, patchErr := patchCmd.CombinedOutput() + if patchErr != nil { + fmt.Printf("Warning: Failed to remove finalizers: %s\n%s\n", patchErr, string(patchOut)) + } + + time.Sleep(5 * time.Second) + fmt.Printf("Namespace %s deletion completed\n", namespace) + return nil +} + +// WaitForPodRunning waits for a pod to reach Running state with retries +func WaitForPodRunning(kubeconfig, namespace, podName string, maxRetries, sleepSeconds int) error { + for attempt := 1; attempt <= maxRetries; attempt++ { + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "pod", podName, "-n", namespace, "-o", "jsonpath={.status.phase}") + out, err := cmd.CombinedOutput() + + if err == nil && strings.TrimSpace(string(out)) == "Running" { + fmt.Printf("Pod %s is now Running\n", podName) + return nil + } + + if attempt < maxRetries { + fmt.Printf("Pod %s not running yet (attempt %d/%d), status: %s. Waiting %d seconds...\n", + podName, attempt, maxRetries, strings.TrimSpace(string(out)), sleepSeconds) + time.Sleep(time.Duration(sleepSeconds) * time.Second) + } + } + + return fmt.Errorf("%w: pod %s after %d attempts", ErrPodNotRunning, podName, maxRetries) +} + +// GetPodIP retrieves the IP address of a pod +func GetPodIP(kubeconfig, namespace, podName string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "get", "pod", podName, + "-n", namespace, "-o", "jsonpath={.status.podIP}") + out, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to get pod IP for %s in namespace %s: %w\nOutput: %s", podName, namespace, err, string(out)) + } + + ip := strings.TrimSpace(string(out)) + if ip == "" { + return "", fmt.Errorf("%w: pod %s in namespace %s", ErrPodNoIP, podName, namespace) + } + + return ip, nil +} + +// GetPodDelegatedIP retrieves the eth1 IP address (delegated subnet IP) of a pod +// This is the IP used for cross-VNet communication and is subject to NSG rules +func GetPodDelegatedIP(kubeconfig, namespace, podName string) (string, error) { + // Retry logic - pod might be Running but container not ready yet, or network interface still initializing + maxRetries := 5 + for attempt := 1; attempt <= maxRetries; attempt++ { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // Get eth1 IP address by running 'ip addr show eth1' in the pod + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "exec", podName, + "-n", namespace, "-c", "net-debugger", "--", "sh", "-c", "ip -4 addr show eth1 | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1") + out, err := cmd.CombinedOutput() + cancel() + + if err == nil { + ip := strings.TrimSpace(string(out)) + if ip != "" { + return ip, nil + } + return "", fmt.Errorf("%w: pod %s in namespace %s", ErrPodNoEth1IP, podName, namespace) + } + + // Check for retryable errors: container not found, signal killed, context deadline exceeded + errStr := strings.ToLower(err.Error()) + outStr := strings.ToLower(string(out)) + isRetryable := strings.Contains(outStr, "container not found") || + strings.Contains(errStr, "signal: killed") || + strings.Contains(errStr, "context deadline exceeded") + + if isRetryable && attempt < maxRetries { + fmt.Printf("Retryable error getting IP for pod %s (attempt %d/%d): %v. Waiting 5 seconds...\n", podName, attempt, maxRetries, err) + time.Sleep(5 * time.Second) + continue + } + + return "", fmt.Errorf("failed to get eth1 IP for %s in namespace %s: %w\nOutput: %s", podName, namespace, err, string(out)) + } + + return "", fmt.Errorf("%w: pod %s after %d attempts", ErrPodContainerNotReady, podName, maxRetries) +} + +// ExecInPod executes a command in a pod and returns the output +func ExecInPod(kubeconfig, namespace, podName, command string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "exec", podName, + "-n", namespace, "--", "sh", "-c", command) + out, err := cmd.CombinedOutput() + if err != nil { + return string(out), fmt.Errorf("failed to exec in pod %s in namespace %s: %w", podName, namespace, err) + } + + return string(out), nil +} + +// VerifyNoMTPNC checks if there are any pending MTPNC (MultiTenantPodNetworkConfig) resources +// associated with a specific build ID that should have been cleaned up +func VerifyNoMTPNC(kubeconfig, buildID string) error { + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "mtpnc", "-A", "-o", "json") + out, err := cmd.CombinedOutput() + if err != nil { + // If MTPNC CRD doesn't exist, that's fine + if strings.Contains(string(out), "the server doesn't have a resource type") { + return nil + } + return fmt.Errorf("failed to get MTPNC resources: %w\nOutput: %s", err, string(out)) + } + + // Parse JSON to check for any MTPNC resources matching our build ID + output := string(out) + if strings.Contains(output, buildID) { + // Extract MTPNC names for better error reporting + lines := strings.Split(output, "\n") + var mtpncNames []string + for _, line := range lines { + if strings.Contains(line, buildID) && strings.Contains(line, "\"name\":") { + // Basic extraction - could be improved with proper JSON parsing + mtpncNames = append(mtpncNames, line) + } + } + + if len(mtpncNames) > 0 { + return fmt.Errorf("%w: found %d MTPNC resources with build ID '%s'", ErrMTPNCStuckDeletion, len(mtpncNames), buildID) + } + } + + return nil +} diff --git a/test/integration/swiftv2/longRunningCluster/datapath.go b/test/integration/swiftv2/longRunningCluster/datapath.go new file mode 100644 index 0000000000..67836a74cb --- /dev/null +++ b/test/integration/swiftv2/longRunningCluster/datapath.go @@ -0,0 +1,766 @@ +package longrunningcluster + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "text/template" + "time" + + "github.com/Azure/azure-container-networking/test/integration/swiftv2/helpers" +) + +var ( + ErrNoLowNICNodes = errors.New("no low-NIC nodes available") + ErrNoHighNICNodes = errors.New("no high-NIC nodes available") + ErrAllLowNICNodesInUse = errors.New("all low-NIC nodes already in use") + ErrAllHighNICNodesInUse = errors.New("all high-NIC nodes already in use") + ErrFailedToGenerateSASToken = errors.New("failed to generate SAS token") + ErrSASTokenEmpty = errors.New("generated SAS token is empty") + ErrSASTokenInvalid = errors.New("generated SAS token appears invalid") + ErrPodNotRunning = errors.New("pod is not running") + ErrHTTPAuthError = errors.New("HTTP authentication error from private endpoint") + ErrBlobNotFound = errors.New("blob not found (404) on private endpoint") + ErrUnexpectedBlobResponse = errors.New("unexpected response from blob download (no 'Hello' or '200 OK' found)") + ErrInvalidWorkloadType = errors.New("invalid workload type") + ErrUnexpectedTCPResponse = errors.New("unexpected TCP response") +) + +func getKubeconfigPath(clusterName string) string { + kubeconfigDir := os.Getenv("KUBECONFIG_DIR") + if kubeconfigDir == "" { + kubeconfigDir = "/tmp" + } + return fmt.Sprintf("%s/%s.kubeconfig", kubeconfigDir, clusterName) +} + +func applyTemplate(templatePath string, data interface{}, kubeconfig string) error { + tmpl, err := template.ParseFiles(templatePath) + if err != nil { + return fmt.Errorf("failed to parse template: %w", err) + } + + var buf bytes.Buffer + if err = tmpl.Execute(&buf, data); err != nil { + return fmt.Errorf("failed to execute template: %w", err) + } + + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "apply", "-f", "-") + cmd.Stdin = &buf + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("kubectl apply failed: %w\nOutput: %s", err, string(out)) + } + + return nil +} + +type PodNetworkData struct { + PNName string + VnetGUID string + SubnetGUID string + SubnetARMID string + SubnetToken string +} + +func CreatePodNetwork(kubeconfig string, data PodNetworkData, templatePath string) error { + return applyTemplate(templatePath, data, kubeconfig) +} + +type PNIData struct { + PNIName string + PNName string + Namespace string + Type string + Reservations int +} + +func CreatePodNetworkInstance(kubeconfig string, data PNIData, templatePath string) error { + return applyTemplate(templatePath, data, kubeconfig) +} + +type PodData struct { + PodName string + NodeName string + OS string + PNName string + PNIName string + Namespace string + Image string +} + +func CreatePod(kubeconfig string, data PodData, templatePath string) error { + return applyTemplate(templatePath, data, kubeconfig) +} + +type TestResources struct { + Kubeconfig string + PNName string + PNIName string + VnetGUID string + SubnetGUID string + SubnetARMID string + SubnetToken string + PodNetworkTemplate string + PNITemplate string + PodTemplate string + PodImage string +} + +type PodScenario struct { + Name string // Descriptive name for the scenario + Cluster string // "aks-1" or "aks-2" + VnetName string // e.g., "cx_vnet_v1", "cx_vnet_v4" + SubnetName string // e.g., "s1", "s2" + NodeSelector string // "low-nic" or "high-nic" + PodNameSuffix string // Unique suffix for pod name +} + +type TestScenarios struct { + ResourceGroup string + BuildID string + PodImage string + Scenarios []PodScenario + VnetSubnetCache map[string]VnetSubnetInfo + UsedNodes map[string]bool +} + +type VnetSubnetInfo struct { + VnetGUID string + SubnetGUID string + SubnetARMID string + SubnetToken string +} + +func isValidWorkloadType(workloadType string) bool { + validTypes := []string{ + "swiftv2-linux", + "swiftv2-windows", + "swiftv2-linux-byocni", + "swiftv2-windows-byocni", + } + + for _, validType := range validTypes { + if workloadType == validType { + return true + } + } + return false +} + +type NodePoolInfo struct { + LowNicNodes []string + HighNicNodes []string +} + +func GetNodesByNicCount(kubeconfig string) (NodePoolInfo, error) { + nodeInfo := NodePoolInfo{ + LowNicNodes: []string{}, + HighNicNodes: []string{}, + } + + workloadType := strings.TrimSpace(os.Getenv("WORKLOAD_TYPE")) + if workloadType == "" { + workloadType = "swiftv2-linux" + } + + if !isValidWorkloadType(workloadType) { + return NodePoolInfo{}, fmt.Errorf("%w: %s", ErrInvalidWorkloadType, workloadType) + } + + fmt.Printf("Filtering nodes by workload-type=%s\n", workloadType) + + lowNicLabelSelector := "nic-capacity=low-nic,workload-type=" + workloadType + highNicLabelSelector := "nic-capacity=high-nic,workload-type=" + workloadType + + cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "nodes", + "-l", lowNicLabelSelector, "-o", "name") + out, err := cmd.CombinedOutput() + if err != nil { + return NodePoolInfo{}, fmt.Errorf("failed to get low-nic nodes: %w\nOutput: %s", err, string(out)) + } + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + for _, line := range lines { + if strings.HasPrefix(line, "node/") { + nodeInfo.LowNicNodes = append(nodeInfo.LowNicNodes, strings.TrimPrefix(line, "node/")) + } + } + + cmd = exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "nodes", + "-l", highNicLabelSelector, "-o", "name") + out, err = cmd.CombinedOutput() + if err != nil { + return NodePoolInfo{}, fmt.Errorf("failed to get high-nic nodes: %w\nOutput: %s", err, string(out)) + } + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + for _, line := range lines { + if line != "" && strings.HasPrefix(line, "node/") { + nodeInfo.HighNicNodes = append(nodeInfo.HighNicNodes, strings.TrimPrefix(line, "node/")) + } + } + + fmt.Printf("Found %d low-nic nodes and %d high-nic nodes with workload-type=%s\n", + len(nodeInfo.LowNicNodes), len(nodeInfo.HighNicNodes), workloadType) + + return nodeInfo, nil +} + +func CreatePodNetworkResource(resources TestResources) error { + err := CreatePodNetwork(resources.Kubeconfig, PodNetworkData{ + PNName: resources.PNName, + VnetGUID: resources.VnetGUID, + SubnetGUID: resources.SubnetGUID, + SubnetARMID: resources.SubnetARMID, + SubnetToken: resources.SubnetToken, + }, resources.PodNetworkTemplate) + if err != nil { + return fmt.Errorf("failed to create PodNetwork: %w", err) + } + return nil +} + +func CreateNamespaceResource(kubeconfig, namespace string) error { + err := helpers.EnsureNamespaceExists(kubeconfig, namespace) + if err != nil { + return fmt.Errorf("failed to create namespace: %w", err) + } + return nil +} + +func CreatePodNetworkInstanceResource(resources TestResources) error { + err := CreatePodNetworkInstance(resources.Kubeconfig, PNIData{ + PNIName: resources.PNIName, + PNName: resources.PNName, + Namespace: resources.PNName, + Type: "explicit", + Reservations: 2, + }, resources.PNITemplate) + if err != nil { + return fmt.Errorf("failed to create PodNetworkInstance: %w", err) + } + return nil +} + +func CreatePodResource(resources TestResources, podName, nodeName string) error { + err := CreatePod(resources.Kubeconfig, PodData{ + PodName: podName, + NodeName: nodeName, + OS: "linux", + PNName: resources.PNName, + PNIName: resources.PNIName, + Namespace: resources.PNName, + Image: resources.PodImage, + }, resources.PodTemplate) + if err != nil { + return fmt.Errorf("failed to create pod %s: %w", podName, err) + } + + err = helpers.WaitForPodRunning(resources.Kubeconfig, resources.PNName, podName, 10, 30) + if err != nil { + return fmt.Errorf("pod %s did not reach running state: %w", podName, err) + } + + return nil +} + +func GetOrFetchVnetSubnetInfo(rg, vnetName, subnetName string, cache map[string]VnetSubnetInfo) (VnetSubnetInfo, error) { + key := fmt.Sprintf("%s/%s", vnetName, subnetName) + + if info, exists := cache[key]; exists { + return info, nil + } + + vnetGUID, err := helpers.GetVnetGUID(rg, vnetName) + if err != nil { + return VnetSubnetInfo{}, fmt.Errorf("failed to get VNet GUID: %w", err) + } + + subnetGUID, err := helpers.GetSubnetGUID(rg, vnetName, subnetName) + if err != nil { + return VnetSubnetInfo{}, fmt.Errorf("failed to get Subnet GUID: %w", err) + } + + subnetARMID, err := helpers.GetSubnetARMID(rg, vnetName, subnetName) + if err != nil { + return VnetSubnetInfo{}, fmt.Errorf("failed to get Subnet ARM ID: %w", err) + } + + info := VnetSubnetInfo{ + VnetGUID: vnetGUID, + SubnetGUID: subnetGUID, + SubnetARMID: subnetARMID, + SubnetToken: "", + } + + cache[key] = info + return info, nil +} + +func CreateScenarioResources(scenario PodScenario, testScenarios TestScenarios) error { + kubeconfig := getKubeconfigPath(scenario.Cluster) + netInfo, err := GetOrFetchVnetSubnetInfo(testScenarios.ResourceGroup, scenario.VnetName, scenario.SubnetName, testScenarios.VnetSubnetCache) + if err != nil { + return fmt.Errorf("failed to get network info for %s/%s: %w", scenario.VnetName, scenario.SubnetName, err) + } + + vnetShort := strings.TrimPrefix(scenario.VnetName, "cx_vnet_") + vnetShort = strings.ReplaceAll(vnetShort, "_", "-") + subnetNameSafe := strings.ReplaceAll(scenario.SubnetName, "_", "-") + pnName := fmt.Sprintf("pn-%s-%s-%s", testScenarios.BuildID, vnetShort, subnetNameSafe) + pniName := fmt.Sprintf("pni-%s-%s-%s", testScenarios.BuildID, vnetShort, subnetNameSafe) + + resources := TestResources{ + Kubeconfig: kubeconfig, + PNName: pnName, + PNIName: pniName, + VnetGUID: netInfo.VnetGUID, + SubnetGUID: netInfo.SubnetGUID, + SubnetARMID: netInfo.SubnetARMID, + SubnetToken: netInfo.SubnetToken, + PodNetworkTemplate: "../../manifests/swiftv2/long-running-cluster/podnetwork.yaml", + PNITemplate: "../../manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml", + PodTemplate: "../../manifests/swiftv2/long-running-cluster/pod.yaml", + PodImage: testScenarios.PodImage, + } + + // Step 1: Create PodNetwork + err = CreatePodNetworkResource(resources) + if err != nil { + return fmt.Errorf("scenario %s: %w", scenario.Name, err) + } + + // Step 2: Create namespace + err = CreateNamespaceResource(resources.Kubeconfig, resources.PNName) + if err != nil { + return fmt.Errorf("scenario %s: %w", scenario.Name, err) + } + + // Step 3: Create PodNetworkInstance + err = CreatePodNetworkInstanceResource(resources) + if err != nil { + return fmt.Errorf("scenario %s: %w", scenario.Name, err) + } + + // Step 4: Get nodes by NIC count + nodeInfo, err := GetNodesByNicCount(kubeconfig) + if err != nil { + return fmt.Errorf("scenario %s: failed to get nodes: %w", scenario.Name, err) + } + + // Step 5: Select appropriate node based on scenario + var targetNode string + + if testScenarios.UsedNodes == nil { + testScenarios.UsedNodes = make(map[string]bool) + } + + if scenario.NodeSelector == "low-nic" { + if len(nodeInfo.LowNicNodes) == 0 { + return fmt.Errorf("%w: scenario %s", ErrNoLowNICNodes, scenario.Name) + } + // Find first unused node in the pool (low-NIC nodes can only handle one pod) + targetNode = "" + for _, node := range nodeInfo.LowNicNodes { + if !testScenarios.UsedNodes[node] { + targetNode = node + testScenarios.UsedNodes[node] = true + break + } + } + if targetNode == "" { + return fmt.Errorf("%w: scenario %s", ErrAllLowNICNodesInUse, scenario.Name) + } + } else { + if len(nodeInfo.HighNicNodes) == 0 { + return fmt.Errorf("%w: scenario %s", ErrNoHighNICNodes, scenario.Name) + } + targetNode = "" + for _, node := range nodeInfo.HighNicNodes { + if !testScenarios.UsedNodes[node] { + targetNode = node + testScenarios.UsedNodes[node] = true + break + } + } + if targetNode == "" { + return fmt.Errorf("%w: scenario %s", ErrAllHighNICNodesInUse, scenario.Name) + } + } + + // Step 6: Create pod + podName := "pod-" + scenario.PodNameSuffix + err = CreatePodResource(resources, podName, targetNode) + if err != nil { + return fmt.Errorf("scenario %s: %w", scenario.Name, err) + } + + fmt.Printf("Successfully created scenario: %s (pod: %s on node: %s)\n", scenario.Name, podName, targetNode) + return nil +} + +func DeleteScenarioResources(scenario PodScenario, buildID string) error { + kubeconfig := getKubeconfigPath(scenario.Cluster) + + vnetShort := strings.TrimPrefix(scenario.VnetName, "cx_vnet_") + vnetShort = strings.ReplaceAll(vnetShort, "_", "-") + subnetNameSafe := strings.ReplaceAll(scenario.SubnetName, "_", "-") + pnName := fmt.Sprintf("pn-%s-%s-%s", buildID, vnetShort, subnetNameSafe) + pniName := fmt.Sprintf("pni-%s-%s-%s", buildID, vnetShort, subnetNameSafe) + podName := "pod-" + scenario.PodNameSuffix + + err := helpers.DeletePod(kubeconfig, pnName, podName) + if err != nil { + return fmt.Errorf("scenario %s: failed to delete pod: %w", scenario.Name, err) + } + + err = helpers.DeletePodNetworkInstance(kubeconfig, pnName, pniName) + if err != nil { + return fmt.Errorf("scenario %s: failed to delete PNI: %w", scenario.Name, err) + } + + err = helpers.DeletePodNetwork(kubeconfig, pnName) + if err != nil { + return fmt.Errorf("scenario %s: failed to delete PN: %w", scenario.Name, err) + } + + err = helpers.DeleteNamespace(kubeconfig, pnName) + if err != nil { + return fmt.Errorf("scenario %s: failed to delete namespace: %w", scenario.Name, err) + } + + fmt.Printf("Successfully deleted scenario: %s\n", scenario.Name) + return nil +} + +func CreateAllScenarios(testScenarios TestScenarios) error { + for _, scenario := range testScenarios.Scenarios { + fmt.Printf("\n=== Creating scenario: %s ===\n", scenario.Name) + err := CreateScenarioResources(scenario, testScenarios) + if err != nil { + return err + } + } + return nil +} + +func DeleteAllScenarios(testScenarios TestScenarios) error { + // Phase 1: Delete all pods first + fmt.Printf("\n=== Phase 1: Deleting all pods ===\n") + for _, scenario := range testScenarios.Scenarios { + kubeconfig := getKubeconfigPath(scenario.Cluster) + vnetShort := strings.TrimPrefix(scenario.VnetName, "cx_vnet_") + vnetShort = strings.ReplaceAll(vnetShort, "_", "-") + subnetNameSafe := strings.ReplaceAll(scenario.SubnetName, "_", "-") + pnName := fmt.Sprintf("pn-%s-%s-%s", testScenarios.BuildID, vnetShort, subnetNameSafe) + podName := "pod-" + scenario.PodNameSuffix + + fmt.Printf("Deleting pod for scenario: %s\n", scenario.Name) + err := helpers.DeletePod(kubeconfig, pnName, podName) + if err != nil { + fmt.Printf("Warning: Failed to delete pod for scenario %s: %v\n", scenario.Name, err) + } + } + + // Phase 2: Delete shared PNI/PN/Namespace resources (grouped by vnet/subnet/cluster) + fmt.Printf("\n=== Phase 2: Deleting shared PNI/PN/Namespace resources ===\n") + resourceGroups := make(map[string]bool) + + for _, scenario := range testScenarios.Scenarios { + kubeconfig := getKubeconfigPath(scenario.Cluster) + vnetShort := strings.TrimPrefix(scenario.VnetName, "cx_vnet_") + vnetShort = strings.ReplaceAll(vnetShort, "_", "-") + subnetNameSafe := strings.ReplaceAll(scenario.SubnetName, "_", "-") + pnName := fmt.Sprintf("pn-%s-%s-%s", testScenarios.BuildID, vnetShort, subnetNameSafe) + pniName := fmt.Sprintf("pni-%s-%s-%s", testScenarios.BuildID, vnetShort, subnetNameSafe) + + resourceKey := fmt.Sprintf("%s:%s", scenario.Cluster, pnName) + if resourceGroups[resourceKey] { + continue + } + resourceGroups[resourceKey] = true + + fmt.Printf("\nDeleting shared resources for %s/%s on %s\n", scenario.VnetName, scenario.SubnetName, scenario.Cluster) + + err := helpers.DeletePodNetworkInstance(kubeconfig, pnName, pniName) + if err != nil { + fmt.Printf("Warning: Failed to delete PNI %s: %v\n", pniName, err) + } + + err = helpers.DeletePodNetwork(kubeconfig, pnName) + if err != nil { + fmt.Printf("Warning: Failed to delete PN %s: %v\n", pnName, err) + } + + err = helpers.DeleteNamespace(kubeconfig, pnName) + if err != nil { + fmt.Printf("Warning: Failed to delete namespace %s: %v\n", pnName, err) + } + } + + // Phase 3: Verify no MTPNC resources are stuck + fmt.Printf("\n=== Phase 3: Verifying MTPNC cleanup ===\n") + clustersChecked := make(map[string]bool) + + for _, scenario := range testScenarios.Scenarios { + if clustersChecked[scenario.Cluster] { + continue + } + clustersChecked[scenario.Cluster] = true + + kubeconfig := getKubeconfigPath(scenario.Cluster) + fmt.Printf("Checking for pending MTPNC resources in cluster %s\n", scenario.Cluster) + + err := helpers.VerifyNoMTPNC(kubeconfig, testScenarios.BuildID) + if err != nil { + fmt.Printf("WARNING: Found pending MTPNC resources in cluster %s: %v\n", scenario.Cluster, err) + } else { + fmt.Printf("No pending MTPNC resources found in cluster %s\n", scenario.Cluster) + } + } + + fmt.Printf("\n=== All scenarios deleted ===\n") + return nil +} + +func DeleteTestResources(kubeconfig, pnName, pniName string) error { + for i := 0; i < 2; i++ { + podName := fmt.Sprintf("pod-c2-%d", i) + err := helpers.DeletePod(kubeconfig, pnName, podName) + if err != nil { + return fmt.Errorf("failed to delete pod %s: %w", podName, err) + } + } + + err := helpers.DeletePodNetworkInstance(kubeconfig, pnName, pniName) + if err != nil { + return fmt.Errorf("failed to delete PodNetworkInstance: %w", err) + } + + err = helpers.DeletePodNetwork(kubeconfig, pnName) + if err != nil { + return fmt.Errorf("failed to delete PodNetwork: %w", err) + } + + err = helpers.DeleteNamespace(kubeconfig, pnName) + if err != nil { + return fmt.Errorf("failed to delete namespace: %w", err) + } + + return nil +} + +// ConnectivityTest defines a connectivity test between two pods +type ConnectivityTest struct { + Name string + SourcePod string + SourceNamespace string // Namespace of the source pod + DestinationPod string + DestNamespace string // Namespace of the destination pod + Cluster string // Cluster where source pod is running (for backward compatibility) + DestCluster string // Cluster where destination pod is running (if different from source) + Description string + ShouldFail bool // If true, connectivity is expected to fail (NSG block, customer isolation) + + // Fields for private endpoint tests + SourceCluster string // Cluster where source pod is running + SourcePodName string // Name of the source pod + SourceNS string // Namespace of the source pod + DestEndpoint string // Destination endpoint (IP or hostname) + TestType string // Type of test: "pod-to-pod" or "storage-access" + Purpose string // Description of the test purpose +} + +// RunConnectivityTest tests TCP connectivity between two pods using netcat +func RunConnectivityTest(test ConnectivityTest) error { + sourceKubeconfig := getKubeconfigPath(test.Cluster) + + destKubeconfig := sourceKubeconfig + if test.DestCluster != "" { + destKubeconfig = getKubeconfigPath(test.DestCluster) + } + + destIP, err := helpers.GetPodDelegatedIP(destKubeconfig, test.DestNamespace, test.DestinationPod) + if err != nil { + return fmt.Errorf("failed to get destination pod delegated IP: %w", err) + } + + fmt.Printf("Testing TCP connectivity from %s/%s (cluster: %s) to %s/%s (cluster: %s, eth1: %s) on port 8080\n", + test.SourceNamespace, test.SourcePod, test.Cluster, + test.DestNamespace, test.DestinationPod, test.DestCluster, destIP) + + // Use netcat to test TCP connectivity through the delegated subnet interface (eth1) + // -w 3: 3 second timeout for connection + // -z: Zero-I/O mode (scanning) - just check if port is open + // Route through eth1 by binding to its IP address + eth1IP, err := helpers.GetPodDelegatedIP(sourceKubeconfig, test.SourceNamespace, test.SourcePod) + if err != nil { + return fmt.Errorf("failed to get source pod eth1 IP: %w", err) + } + + // Test TCP connection: send test message and read response + ncCmd := fmt.Sprintf("echo 'test' | nc -w 3 -s %s %s 8080", eth1IP, destIP) + + output, err := helpers.ExecInPod(sourceKubeconfig, test.SourceNamespace, test.SourcePod, ncCmd) + if err != nil { + return fmt.Errorf("TCP connectivity test failed: %w\nOutput: %s", err, output) + } + + // Verify we got the expected response from the TCP server + if strings.Contains(output, "TCP Connection Success") { + fmt.Printf("TCP connectivity successful! Response: %s\n", truncateString(output, 100)) + return nil + } + + return fmt.Errorf("%w (expected 'TCP Connection Success')\nOutput: %s", ErrUnexpectedTCPResponse, truncateString(output, 100)) +} + +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} + +func GenerateStorageSASToken(storageAccountName, containerName, blobName string) (string, error) { + expiryTime := time.Now().UTC().Add(7 * 24 * time.Hour).Format("2006-01-02") + + cmd := exec.Command("az", "storage", "blob", "generate-sas", + "--account-name", storageAccountName, + "--container-name", containerName, + "--name", blobName, + "--permissions", "r", + "--expiry", expiryTime, + "--output", "tsv") + + out, err := cmd.CombinedOutput() + sasToken := strings.TrimSpace(string(out)) + + accountKeyWorked := err == nil && !strings.Contains(sasToken, "WARNING") && + !strings.Contains(sasToken, "ERROR") && (strings.Contains(sasToken, "sv=") || strings.Contains(sasToken, "sig=")) + + if !accountKeyWorked { + if err != nil { + fmt.Printf("Account key SAS generation failed (error): %s\n", string(out)) + } else { + fmt.Printf("Account key SAS generation failed (no credentials): %s\n", sasToken) + } + + cmd = exec.Command("az", "storage", "blob", "generate-sas", + "--account-name", storageAccountName, + "--container-name", containerName, + "--name", blobName, + "--permissions", "r", + "--expiry", expiryTime, + "--auth-mode", "login", + "--as-user", + "--output", "tsv") + + out, err = cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("%w (both account key and user delegation): %w\n%s", ErrFailedToGenerateSASToken, err, string(out)) + } + + sasToken = strings.TrimSpace(string(out)) + } + + if sasToken == "" { + return "", ErrSASTokenEmpty + } + sasToken = strings.Trim(sasToken, "\"'") + if !strings.Contains(sasToken, "sv=") && !strings.Contains(sasToken, "sig=") { + return "", fmt.Errorf("%w (missing sv= or sig=): %s", ErrSASTokenInvalid, sasToken) + } + + return sasToken, nil +} + +func GetStoragePrivateEndpoint(storageAccountName string) (string, error) { + return storageAccountName + ".blob.core.windows.net", nil +} + +func RunPrivateEndpointTest(test ConnectivityTest) error { + kubeconfig := getKubeconfigPath(test.SourceCluster) + + fmt.Printf("Testing private endpoint access from %s to %s\n", + test.SourcePodName, test.DestEndpoint) + + // Step 1: Verify pod is running + fmt.Printf("==> Verifying pod %s is running\n", test.SourcePodName) + podStatusCmd := fmt.Sprintf("kubectl --kubeconfig %s get pod %s -n %s -o jsonpath='{.status.phase}'", kubeconfig, test.SourcePodName, test.SourceNS) + statusOut, err := exec.Command("sh", "-c", podStatusCmd).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to get pod status: %w\nOutput: %s", err, string(statusOut)) + } + podStatus := strings.TrimSpace(string(statusOut)) + if podStatus != "Running" { + return fmt.Errorf("%w: pod %s (status: %s)", ErrPodNotRunning, test.SourcePodName, podStatus) + } + fmt.Printf("Pod is running\n") + + // Step 2: Verify DNS resolution with longer timeout + fmt.Printf("==> Checking DNS resolution for %s\n", test.DestEndpoint) + resolveCmd := fmt.Sprintf("nslookup %s | tail -2", test.DestEndpoint) + resolveOutput, resolveErr := ExecInPodWithTimeout(kubeconfig, test.SourceNS, test.SourcePodName, resolveCmd, 20*time.Second) + if resolveErr != nil { + return fmt.Errorf("DNS resolution failed: %w\nOutput: %s", resolveErr, resolveOutput) + } + fmt.Printf("DNS Resolution Result:\n%s\n", resolveOutput) + + // Step 3: Generate SAS token for test blob + fmt.Printf("==> Generating SAS token for test blob\n") + // Extract storage account name from FQDN (e.g., sa106936191.blob.core.windows.net -> sa106936191) + storageAccountName := strings.Split(test.DestEndpoint, ".")[0] + sasToken, err := GenerateStorageSASToken(storageAccountName, "test", "hello.txt") + if err != nil { + return fmt.Errorf("failed to generate SAS token: %w", err) + } + + // Step 4: Download test blob using SAS token with verbose output + fmt.Printf("==> Downloading test blob via private endpoint\n") + blobURL := fmt.Sprintf("https://%s/test/hello.txt?%s", test.DestEndpoint, sasToken) + + // Use wget instead of curl - it handles special characters better + // -O- outputs to stdout, -q is quiet mode, --timeout sets timeout + wgetCmd := fmt.Sprintf("wget -O- --timeout=30 --tries=1 '%s' 2>&1", blobURL) + + output, err := ExecInPodWithTimeout(kubeconfig, test.SourceNS, test.SourcePodName, wgetCmd, 45*time.Second) + if err != nil { + if strings.Contains(output, "ERROR 403") || strings.Contains(output, "ERROR 401") { + return fmt.Errorf("%w\nOutput: %s", ErrHTTPAuthError, truncateString(output, 500)) + } + if strings.Contains(output, "ERROR 404") { + return fmt.Errorf("%w\nOutput: %s", ErrBlobNotFound, truncateString(output, 500)) + } + return fmt.Errorf("private endpoint connectivity test failed: %w\nOutput: %s", err, truncateString(output, 500)) + } + + if strings.Contains(output, "Hello") || strings.Contains(output, "200 OK") || strings.Contains(output, "saved") { + fmt.Printf("Private endpoint access successful!\n") + return nil + } + + return fmt.Errorf("%w\nOutput: %s", ErrUnexpectedBlobResponse, truncateString(output, 500)) +} + +func ExecInPodWithTimeout(kubeconfig, namespace, podName, command string, timeout time.Duration) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "exec", podName, + "-n", namespace, "--", "sh", "-c", command) + out, err := cmd.CombinedOutput() + if err != nil { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + return string(out), fmt.Errorf("command timed out after %v in pod %s: %w", timeout, podName, ctx.Err()) + } + return string(out), fmt.Errorf("failed to exec in pod %s in namespace %s: %w", podName, namespace, err) + } + + return string(out), nil +} diff --git a/test/integration/swiftv2/longRunningCluster/datapath_connectivity_test.go b/test/integration/swiftv2/longRunningCluster/datapath_connectivity_test.go new file mode 100644 index 0000000000..76f86f0749 --- /dev/null +++ b/test/integration/swiftv2/longRunningCluster/datapath_connectivity_test.go @@ -0,0 +1,156 @@ +//go:build connectivity_test +// +build connectivity_test + +package longrunningcluster + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +func TestDatapathConnectivity(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Datapath Connectivity Suite") +} + +var _ = ginkgo.Describe("Datapath Connectivity Tests", func() { + + ginkgo.It("tests TCP connectivity between pods", func() { + rg := os.Getenv("RG") + buildId := os.Getenv("BUILD_ID") + if rg == "" || buildId == "" { + ginkgo.Fail(fmt.Sprintf("Missing required environment variables: RG='%s', BUILD_ID='%s'", rg, buildId)) + } + // Helper function to generate namespace from vnet and subnet + getNamespace := func(vnetName, subnetName string) string { + vnetPrefix := strings.TrimPrefix(vnetName, "cx_vnet_") + return fmt.Sprintf("pn-%s-%s-%s", rg, vnetPrefix, subnetName) + } + + // Define connectivity test cases + connectivityTests := []ConnectivityTest{ + { + Name: "SameVNetSameSubnet", + SourcePod: "pod-c1-aks1-v1s2-low", + SourceNamespace: getNamespace("cx_vnet_v1", "s2"), + DestinationPod: "pod-c1-aks1-v1s2-high", + DestNamespace: getNamespace("cx_vnet_v1", "s2"), + Cluster: "aks-1", + Description: "Test connectivity between low-NIC and high-NIC pods in same VNet/Subnet (cx_vnet_v1/s2)", + ShouldFail: false, + }, + { + Name: "NSGBlocked_S1toS2", + SourcePod: "pod-c1-aks1-v1s1-low", + SourceNamespace: getNamespace("cx_vnet_v1", "s1"), + DestinationPod: "pod-c1-aks1-v1s2-high", + DestNamespace: getNamespace("cx_vnet_v1", "s2"), + Cluster: "aks-1", + Description: "Test NSG isolation: s1 -> s2 in cx_vnet_v1 (should be blocked by NSG rule)", + ShouldFail: true, + }, + { + Name: "NSGBlocked_S2toS1", + SourcePod: "pod-c1-aks1-v1s2-low", + SourceNamespace: getNamespace("cx_vnet_v1", "s2"), + DestinationPod: "pod-c1-aks1-v1s1-low", + DestNamespace: getNamespace("cx_vnet_v1", "s1"), + Cluster: "aks-1", + Description: "Test NSG isolation: s2 -> s1 in cx_vnet_v1 (should be blocked by NSG rule)", + ShouldFail: true, + }, + { + Name: "DifferentClusters_SameVNet", + SourcePod: "pod-c1-aks1-v2s1-high", + SourceNamespace: getNamespace("cx_vnet_v2", "s1"), + DestinationPod: "pod-c1-aks2-v2s1-low", + DestNamespace: getNamespace("cx_vnet_v2", "s1"), + Cluster: "aks-1", + DestCluster: "aks-2", + Description: "Test connectivity across different clusters, same customer VNet (cx_vnet_v2)", + ShouldFail: false, + }, + { + Name: "PeeredVNets", + SourcePod: "pod-c1-aks1-v1s2-low", + SourceNamespace: getNamespace("cx_vnet_v1", "s2"), + DestinationPod: "pod-c1-aks1-v2s1-high", + DestNamespace: getNamespace("cx_vnet_v2", "s1"), + Cluster: "aks-1", + Description: "Test connectivity between peered VNets (cx_vnet_v1/s2 <-> cx_vnet_v2/s1)", + ShouldFail: false, + }, + { + Name: "PeeredVNets_v2tov3", + SourcePod: "pod-c1-aks1-v2s1-high", + SourceNamespace: getNamespace("cx_vnet_v2", "s1"), + DestinationPod: "pod-c1-aks2-v3s1-high", + DestNamespace: getNamespace("cx_vnet_v3", "s1"), + Cluster: "aks-1", + DestCluster: "aks-2", + Description: "Test connectivity between peered VNets across clusters (cx_vnet_v2 <-> cx_vnet_v3)", + ShouldFail: false, + }, + { + Name: "DifferentCustomers_v1tov4", + SourcePod: "pod-c1-aks1-v1s2-low", + SourceNamespace: getNamespace("cx_vnet_v1", "s2"), + DestinationPod: "pod-c2-aks2-v4s1-low", + DestNamespace: getNamespace("cx_vnet_v4", "s1"), + Cluster: "aks-1", + DestCluster: "aks-2", + Description: "Test isolation: Customer 1 to Customer 2 should fail (cx_vnet_v1 -> cx_vnet_v4)", + ShouldFail: true, + }, + { + Name: "DifferentCustomers_v2tov4", + SourcePod: "pod-c1-aks1-v2s1-high", + SourceNamespace: getNamespace("cx_vnet_v2", "s1"), + DestinationPod: "pod-c2-aks2-v4s1-high", + DestNamespace: getNamespace("cx_vnet_v4", "s1"), + Cluster: "aks-1", + DestCluster: "aks-2", + Description: "Test isolation: Customer 1 to Customer 2 should fail (cx_vnet_v2 -> cx_vnet_v4)", + ShouldFail: true, + }, + } + + ginkgo.By(fmt.Sprintf("Running %d connectivity tests", len(connectivityTests))) + + successCount := 0 + failureCount := 0 + + for _, test := range connectivityTests { + ginkgo.By(fmt.Sprintf("Test: %s - %s", test.Name, test.Description)) + + err := RunConnectivityTest(test) + + if test.ShouldFail { + if err == nil { + fmt.Printf("Test %s: UNEXPECTED SUCCESS (expected to be blocked!)\n", test.Name) + failureCount++ + ginkgo.Fail(fmt.Sprintf("Test %s: Expected failure but succeeded (blocking not working!)", test.Name)) + } else { + fmt.Printf("Test %s: Correctly blocked (connection failed as expected)\n", test.Name) + successCount++ + } + } else { + if err != nil { + fmt.Printf("Test %s: FAILED - %v\n", test.Name, err) + failureCount++ + gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Test %s failed: %v", test.Name, err)) + } else { + fmt.Printf("Test %s: Connectivity successful\n", test.Name) + successCount++ + } + } + } + + ginkgo.By(fmt.Sprintf("Connectivity test summary: %d succeeded, %d failures", successCount, failureCount)) + }) +}) diff --git a/test/integration/swiftv2/longRunningCluster/datapath_create_test.go b/test/integration/swiftv2/longRunningCluster/datapath_create_test.go new file mode 100644 index 0000000000..fb1d54ac9d --- /dev/null +++ b/test/integration/swiftv2/longRunningCluster/datapath_create_test.go @@ -0,0 +1,115 @@ +//go:build create_test +// +build create_test + +package longrunningcluster + +import ( + "fmt" + "os" + "testing" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +func TestDatapathCreate(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Datapath Create Suite") +} + +var _ = ginkgo.Describe("Datapath Create Tests", func() { + + ginkgo.It("creates PodNetwork, PodNetworkInstance, and Pods", func() { + rg := os.Getenv("RG") + buildId := os.Getenv("BUILD_ID") + if rg == "" || buildId == "" { + ginkgo.Fail(fmt.Sprintf("Missing required environment variables: RG='%s', BUILD_ID='%s'", rg, buildId)) + } + // Define all test scenarios + scenarios := []PodScenario{ + // Customer 2 scenarios on aks-2 with cx_vnet_v4 + { + Name: "Customer2-AKS2-VnetV4-S1-LowNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v4", + SubnetName: "s1", + NodeSelector: "low-nic", + PodNameSuffix: "c2-aks2-v4s1-low", + }, + { + Name: "Customer2-AKS2-VnetV4-S1-HighNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v4", + SubnetName: "s1", + NodeSelector: "high-nic", + PodNameSuffix: "c2-aks2-v4s1-high", + }, + // Customer 1 scenarios + { + Name: "Customer1-AKS1-VnetV1-S1-LowNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v1", + SubnetName: "s1", + NodeSelector: "low-nic", + PodNameSuffix: "c1-aks1-v1s1-low", + }, + { + Name: "Customer1-AKS1-VnetV1-S2-LowNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v1", + SubnetName: "s2", + NodeSelector: "low-nic", + PodNameSuffix: "c1-aks1-v1s2-low", + }, + { + Name: "Customer1-AKS1-VnetV1-S2-HighNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v1", + SubnetName: "s2", + NodeSelector: "high-nic", + PodNameSuffix: "c1-aks1-v1s2-high", + }, + { + Name: "Customer1-AKS1-VnetV2-S1-HighNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v2", + SubnetName: "s1", + NodeSelector: "high-nic", + PodNameSuffix: "c1-aks1-v2s1-high", + }, + { + Name: "Customer1-AKS2-VnetV2-S1-LowNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v2", + SubnetName: "s1", + NodeSelector: "low-nic", + PodNameSuffix: "c1-aks2-v2s1-low", + }, + { + Name: "Customer1-AKS2-VnetV3-S1-HighNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v3", + SubnetName: "s1", + NodeSelector: "high-nic", + PodNameSuffix: "c1-aks2-v3s1-high", + }, + } + + // Initialize test scenarios with cache + testScenarios := TestScenarios{ + ResourceGroup: rg, + BuildID: buildId, + PodImage: "nicolaka/netshoot:latest", + Scenarios: scenarios, + VnetSubnetCache: make(map[string]VnetSubnetInfo), + UsedNodes: make(map[string]bool), + } + + // Create all scenario resources + ginkgo.By(fmt.Sprintf("Creating all test scenarios (%d scenarios)", len(scenarios))) + err := CreateAllScenarios(testScenarios) + gomega.Expect(err).To(gomega.BeNil(), "Failed to create test scenarios") + + ginkgo.By("Successfully created all test scenarios") + }) +}) diff --git a/test/integration/swiftv2/longRunningCluster/datapath_delete_test.go b/test/integration/swiftv2/longRunningCluster/datapath_delete_test.go new file mode 100644 index 0000000000..7fba50fc6f --- /dev/null +++ b/test/integration/swiftv2/longRunningCluster/datapath_delete_test.go @@ -0,0 +1,113 @@ +//go:build delete_test + +package longrunningcluster + +import ( + "fmt" + "os" + "testing" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +func TestDatapathDelete(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Datapath Delete Suite") +} + +var _ = ginkgo.Describe("Datapath Delete Tests", func() { + ginkgo.It("deletes PodNetwork, PodNetworkInstance, and Pods", func() { + rg := os.Getenv("RG") + buildId := os.Getenv("BUILD_ID") + if rg == "" || buildId == "" { + ginkgo.Fail(fmt.Sprintf("Missing required environment variables: RG='%s', BUILD_ID='%s'", rg, buildId)) + } + // Define all test scenarios (same as create) + scenarios := []PodScenario{ + // Customer 2 scenarios on aks-2 with cx_vnet_v4 + { + Name: "Customer2-AKS2-VnetV4-S1-LowNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v4", + SubnetName: "s1", + NodeSelector: "low-nic", + PodNameSuffix: "c2-aks2-v4s1-low", + }, + { + Name: "Customer2-AKS2-VnetV4-S1-HighNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v4", + SubnetName: "s1", + NodeSelector: "high-nic", + PodNameSuffix: "c2-aks2-v4s1-high", + }, + // Customer 1 scenarios + { + Name: "Customer1-AKS1-VnetV1-S1-LowNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v1", + SubnetName: "s1", + NodeSelector: "low-nic", + PodNameSuffix: "c1-aks1-v1s1-low", + }, + { + Name: "Customer1-AKS1-VnetV1-S2-LowNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v1", + SubnetName: "s2", + NodeSelector: "low-nic", + PodNameSuffix: "c1-aks1-v1s2-low", + }, + { + Name: "Customer1-AKS1-VnetV1-S2-HighNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v1", + SubnetName: "s2", + NodeSelector: "high-nic", + PodNameSuffix: "c1-aks1-v1s2-high", + }, + { + Name: "Customer1-AKS1-VnetV2-S1-HighNic", + Cluster: "aks-1", + VnetName: "cx_vnet_v2", + SubnetName: "s1", + NodeSelector: "high-nic", + PodNameSuffix: "c1-aks1-v2s1-high", + }, + { + Name: "Customer1-AKS2-VnetV2-S1-LowNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v2", + SubnetName: "s1", + NodeSelector: "low-nic", + PodNameSuffix: "c1-aks2-v2s1-low", + }, + { + Name: "Customer1-AKS2-VnetV3-S1-HighNic", + Cluster: "aks-2", + VnetName: "cx_vnet_v3", + SubnetName: "s1", + NodeSelector: "high-nic", + PodNameSuffix: "c1-aks2-v3s1-high", + }, + } + + // Initialize test scenarios with cache + testScenarios := TestScenarios{ + ResourceGroup: rg, + BuildID: buildId, + PodImage: "nicolaka/netshoot:latest", + Scenarios: scenarios, + VnetSubnetCache: make(map[string]VnetSubnetInfo), + UsedNodes: make(map[string]bool), + } + + // Delete all scenario resources + ginkgo.By("Deleting all test scenarios") + err := DeleteAllScenarios(testScenarios) + gomega.Expect(err).To(gomega.BeNil(), "Failed to delete test scenarios") + + ginkgo.By("Successfully deleted all test scenarios") + }) +}) diff --git a/test/integration/swiftv2/longRunningCluster/datapath_private_endpoint_test.go b/test/integration/swiftv2/longRunningCluster/datapath_private_endpoint_test.go new file mode 100644 index 0000000000..c8d75f4846 --- /dev/null +++ b/test/integration/swiftv2/longRunningCluster/datapath_private_endpoint_test.go @@ -0,0 +1,141 @@ +//go:build private_endpoint_test +// +build private_endpoint_test + +package longrunningcluster + +import ( + "fmt" + "os" + "testing" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +func TestDatapathPrivateEndpoint(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Datapath Private Endpoint Suite") +} + +var _ = ginkgo.Describe("Private Endpoint Tests", func() { + rg := os.Getenv("RG") + buildId := os.Getenv("BUILD_ID") + storageAccount1 := os.Getenv("STORAGE_ACCOUNT_1") + storageAccount2 := os.Getenv("STORAGE_ACCOUNT_2") + + ginkgo.It("tests private endpoint access and isolation", func() { + if rg == "" || buildId == "" { + ginkgo.Fail(fmt.Sprintf("Missing required environment variables: RG='%s', BUILD_ID='%s'", rg, buildId)) + } + + if storageAccount1 == "" || storageAccount2 == "" { + ginkgo.Fail(fmt.Sprintf("Missing storage account environment variables: STORAGE_ACCOUNT_1='%s', STORAGE_ACCOUNT_2='%s'", storageAccount1, storageAccount2)) + } + + testScenarios := TestScenarios{ + ResourceGroup: rg, + BuildID: buildId, + PodImage: "nicolaka/netshoot:latest", + VnetSubnetCache: make(map[string]VnetSubnetInfo), + UsedNodes: make(map[string]bool), + } + + storageAccountName := storageAccount1 + ginkgo.By(fmt.Sprintf("Getting private endpoint for storage account: %s", storageAccountName)) + + storageEndpoint, err := GetStoragePrivateEndpoint(storageAccountName) + gomega.Expect(err).To(gomega.BeNil(), "Failed to get storage account private endpoint") + gomega.Expect(storageEndpoint).NotTo(gomega.BeEmpty(), "Storage account private endpoint is empty") + ginkgo.By(fmt.Sprintf("Storage account private endpoint: %s", storageEndpoint)) + + privateEndpointTests := []ConnectivityTest{ + // Test 1: Private Endpoint Access (Tenant A) - Pod from VNet-V1 Subnet 1 + { + Name: "Private Endpoint Access: VNet-V1-S1 to Storage-A", + SourceCluster: "aks-1", + SourcePodName: "pod-c1-aks1-v1s1-low", + SourceNS: "pn-" + testScenarios.BuildID + "-v1-s1", + DestEndpoint: storageEndpoint, + ShouldFail: false, + TestType: "storage-access", + Purpose: "Verify Tenant A pod can access Storage-A via private endpoint", + }, + // Test 2: Private Endpoint Access (Tenant A) - Pod from VNet-V1 Subnet 2 + { + Name: "Private Endpoint Access: VNet-V1-S2 to Storage-A", + SourceCluster: "aks-1", + SourcePodName: "pod-c1-aks1-v1s2-low", + SourceNS: "pn-" + testScenarios.BuildID + "-v1-s2", + DestEndpoint: storageEndpoint, + ShouldFail: false, + TestType: "storage-access", + Purpose: "Verify Tenant A pod can access Storage-A via private endpoint", + }, + // Test 3: Private Endpoint Access (Tenant A) - Pod from VNet-V2 + { + Name: "Private Endpoint Access: VNet-V2-S1 to Storage-A", + SourceCluster: "aks-1", + SourcePodName: "pod-c1-aks1-v2s1-high", + SourceNS: "pn-" + testScenarios.BuildID + "-v2-s1", + DestEndpoint: storageEndpoint, + ShouldFail: false, + TestType: "storage-access", + Purpose: "Verify Tenant A pod from peered VNet can access Storage-A", + }, + // Test 4: Private Endpoint Access (Tenant A) - Pod from VNet-V3 (cross-cluster) + { + Name: "Private Endpoint Access: VNet-V3-S1 to Storage-A (cross-cluster)", + SourceCluster: "aks-2", + SourcePodName: "pod-c1-aks2-v3s1-high", + SourceNS: "pn-" + testScenarios.BuildID + "-v3-s1", + DestEndpoint: storageEndpoint, + ShouldFail: false, + TestType: "storage-access", + Purpose: "Verify Tenant A pod from different cluster can access Storage-A", + }, + } + + ginkgo.By(fmt.Sprintf("Running %d Private Endpoint connectivity tests", len(privateEndpointTests))) + + successCount := 0 + failureCount := 0 + + for _, test := range privateEndpointTests { + ginkgo.By(fmt.Sprintf("\n=== Test: %s ===", test.Name)) + ginkgo.By(fmt.Sprintf("Purpose: %s", test.Purpose)) + ginkgo.By(fmt.Sprintf("Expected: %s", func() string { + if test.ShouldFail { + return "BLOCKED" + } + return "SUCCESS" + }())) + + err := RunPrivateEndpointTest(test) + + if test.ShouldFail { + if err != nil { + ginkgo.By(fmt.Sprintf("Test correctly BLOCKED as expected: %s", test.Name)) + successCount++ + } else { + ginkgo.By(fmt.Sprintf("Test FAILED: Expected connection to be blocked but it succeeded: %s", test.Name)) + failureCount++ + } + } else { + if err != nil { + ginkgo.By(fmt.Sprintf("Test FAILED: %s - Error: %v", test.Name, err)) + failureCount++ + } else { + ginkgo.By(fmt.Sprintf("Test PASSED: %s", test.Name)) + successCount++ + } + } + } + + ginkgo.By(fmt.Sprintf("\n=== Private Endpoint Test Summary ===")) + ginkgo.By(fmt.Sprintf("Total tests: %d", len(privateEndpointTests))) + ginkgo.By(fmt.Sprintf("Successful connections: %d", successCount)) + ginkgo.By(fmt.Sprintf("Unexpected failures: %d", failureCount)) + + gomega.Expect(failureCount).To(gomega.Equal(0), "Some private endpoint tests failed unexpectedly") + }) +}) diff --git a/test/integration/swiftv2/longRunningCluster/datapath_scale_test.go b/test/integration/swiftv2/longRunningCluster/datapath_scale_test.go new file mode 100644 index 0000000000..dcb71af15b --- /dev/null +++ b/test/integration/swiftv2/longRunningCluster/datapath_scale_test.go @@ -0,0 +1,194 @@ +//go:build scale_test +// +build scale_test + +package longrunningcluster + +import ( + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/Azure/azure-container-networking/test/integration/swiftv2/helpers" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +func TestDatapathScale(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Datapath Scale Suite") +} + +var _ = ginkgo.Describe("Datapath Scale Tests", func() { + rg := os.Getenv("RG") + buildId := os.Getenv("BUILD_ID") + + if rg == "" || buildId == "" { + ginkgo.Fail(fmt.Sprintf("Missing required environment variables: RG='%s', BUILD_ID='%s'", rg, buildId)) + } + + ginkgo.It("creates and deletes 15 pods in a burst using device plugin", func() { + // NOTE: Maximum pods per PodNetwork/PodNetworkInstance is limited by: + // 1. Subnet IP address capacity + // 2. Node capacity (typically 250 pods per node) + // 3. Available NICs on nodes (device plugin resources) + // For this test: Creating 15 pods across aks-1 and aks-2 + // Device plugin and Kubernetes scheduler automatically place pods on nodes with available NICs + + // Define scenarios for both clusters - 8 pods on aks-1, 7 pods on aks-2 (15 total for testing) + // IMPORTANT: Reuse existing PodNetworks from connectivity tests to avoid "duplicate podnetwork with same network id" error + scenarios := []struct { + cluster string + vnetName string + subnet string + podCount int + }{ + {cluster: "aks-1", vnetName: "cx_vnet_v1", subnet: "s1", podCount: 8}, + {cluster: "aks-2", vnetName: "cx_vnet_v3", subnet: "s1", podCount: 7}, + } // Initialize test scenarios with cache + testScenarios := TestScenarios{ + ResourceGroup: rg, + BuildID: buildId, + VnetSubnetCache: make(map[string]VnetSubnetInfo), + UsedNodes: make(map[string]bool), + PodImage: "nicolaka/netshoot:latest", + } + + startTime := time.Now() + var allResources []TestResources + for _, scenario := range scenarios { + kubeconfig := getKubeconfigPath(scenario.cluster) + + ginkgo.By(fmt.Sprintf("Getting network info for %s/%s in cluster %s", scenario.vnetName, scenario.subnet, scenario.cluster)) + netInfo, err := GetOrFetchVnetSubnetInfo(testScenarios.ResourceGroup, scenario.vnetName, scenario.subnet, testScenarios.VnetSubnetCache) + gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Failed to get network info for %s/%s", scenario.vnetName, scenario.subnet)) + + vnetShort := strings.TrimPrefix(scenario.vnetName, "cx_vnet_") + vnetShort = strings.ReplaceAll(vnetShort, "_", "-") + subnetNameSafe := strings.ReplaceAll(scenario.subnet, "_", "-") + pnName := fmt.Sprintf("pn-%s-%s-%s", testScenarios.BuildID, vnetShort, subnetNameSafe) // Reuse connectivity test PN + pniName := fmt.Sprintf("pni-scale-%s-%s-%s", testScenarios.BuildID, vnetShort, subnetNameSafe) // New PNI for scale test + + resources := TestResources{ + Kubeconfig: kubeconfig, + PNName: pnName, // References the shared PodNetwork (also the namespace) + PNIName: pniName, // New PNI for scale test + Namespace: pnName, // Same as PN namespace + VnetGUID: netInfo.VnetGUID, + SubnetGUID: netInfo.SubnetGUID, + SubnetARMID: netInfo.SubnetARMID, + SubnetToken: netInfo.SubnetToken, + PodNetworkTemplate: "../../manifests/swiftv2/long-running-cluster/podnetwork.yaml", + PNITemplate: "../../manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml", + PodTemplate: "../../manifests/swiftv2/long-running-cluster/pod-with-device-plugin.yaml", + PodImage: testScenarios.PodImage, + Reservations: 20, // Reserve 20 IPs for scale test pods + } + + ginkgo.By(fmt.Sprintf("Reusing existing PodNetwork: %s in cluster %s", pnName, scenario.cluster)) + ginkgo.By(fmt.Sprintf("Creating PodNetworkInstance: %s (references PN: %s) in namespace %s in cluster %s", pniName, pnName, pnName, scenario.cluster)) + err = CreatePodNetworkInstanceResource(resources) + gomega.Expect(err).To(gomega.BeNil(), "Failed to create PodNetworkInstance") + + allResources = append(allResources, resources) + } + + //Create pods in burst across both clusters - let scheduler place them automatically + totalPods := 0 + for _, s := range scenarios { + totalPods += s.podCount + } + ginkgo.By(fmt.Sprintf("Creating %d pods in burst (auto-scheduled by device plugin)", totalPods)) + + var wg sync.WaitGroup + errors := make(chan error, totalPods) + podIndex := 0 + + for i, scenario := range scenarios { + for j := 0; j < scenario.podCount; j++ { + wg.Add(1) + go func(resources TestResources, cluster string, idx int) { + defer wg.Done() + defer ginkgo.GinkgoRecover() + + podName := fmt.Sprintf("scale-pod-%d", idx) + ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s in cluster %s (auto-scheduled)", podName, resources.PNName, cluster)) + + // Create pod without specifying node - let device plugin and scheduler decide + err := CreatePod(resources.Kubeconfig, PodData{ + PodName: podName, + NodeName: "", + OS: "linux", + PNName: resources.PNName, + PNIName: resources.PNIName, + Namespace: resources.PNName, + Image: resources.PodImage, + }, resources.PodTemplate) + if err != nil { + errors <- fmt.Errorf("failed to create pod %s in cluster %s: %w", podName, cluster, err) + return + } + + err = helpers.WaitForPodScheduled(resources.Kubeconfig, resources.PNName, podName, 10, 6) + if err != nil { + errors <- fmt.Errorf("pod %s in cluster %s was not scheduled: %w", podName, cluster, err) + } + }(allResources[i], scenario.cluster, podIndex) + podIndex++ + } + } + + wg.Wait() + close(errors) + + elapsedTime := time.Since(startTime) + var errList []error + for err := range errors { + errList = append(errList, err) + } + gomega.Expect(errList).To(gomega.BeEmpty(), "Some pods failed to create") + ginkgo.By(fmt.Sprintf("Successfully created %d pods in %s", totalPods, elapsedTime)) + ginkgo.By("Waiting 30 seconds for pods to stabilize") + time.Sleep(30 * time.Second) + + ginkgo.By("Verifying all pods are in Running state") + podIndex = 0 + for i, scenario := range scenarios { + for j := 0; j < scenario.podCount; j++ { + podName := fmt.Sprintf("scale-pod-%d", podIndex) + err := helpers.WaitForPodRunning(allResources[i].Kubeconfig, allResources[i].PNName, podName, 5, 10) + gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Pod %s did not reach running state in cluster %s", podName, scenario.cluster)) + podIndex++ + } + } + + ginkgo.By(fmt.Sprintf("All %d pods are running successfully across both clusters", totalPods)) + ginkgo.By("Cleaning up scale test resources") + podIndex = 0 + for i, scenario := range scenarios { + resources := allResources[i] + kubeconfig := resources.Kubeconfig + + for j := 0; j < scenario.podCount; j++ { + podName := fmt.Sprintf("scale-pod-%d", podIndex) + ginkgo.By(fmt.Sprintf("Deleting pod: %s from namespace %s in cluster %s", podName, resources.PNName, scenario.cluster)) + err := helpers.DeletePod(kubeconfig, resources.PNName, podName) + if err != nil { + fmt.Printf("Warning: Failed to delete pod %s: %v\n", podName, err) + } + podIndex++ + } + + ginkgo.By(fmt.Sprintf("Deleting PodNetworkInstance: %s from namespace %s in cluster %s", resources.PNIName, resources.PNName, scenario.cluster)) + err := helpers.DeletePodNetworkInstance(kubeconfig, resources.PNName, resources.PNIName) + if err != nil { + fmt.Printf("Warning: Failed to delete PNI %s: %v\n", resources.PNIName, err) + } + ginkgo.By(fmt.Sprintf("Keeping PodNetwork and namespace: %s (shared with connectivity tests) in cluster %s", resources.PNName, scenario.cluster)) + } + + ginkgo.By("Scale test cleanup completed") + }) +}) From c0b0fdbcde1f49f845ea9eeb6cf2a5c9e9763574 Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:59:36 -0800 Subject: [PATCH 36/47] ci: fix ebpf manifests and allow e2e usage with cilium 1.18 and up (#4177) * remove bash references from ebpf 1.18 cilium distroless manifests also allows overriding cilium version in ebpf deploy script from acn pipeline if the cilium version is < 1.17, we use the makefile defaults as ebpf is not supported prior to 1.17 * skip ebpf host routing test if < 1.17 cilium * remove whitespace (noop) --- .../cilium-ebpf/cilium-e2e-job-template.yaml | 3 ++- .../cilium-ebpf/cilium-e2e-step-template.yaml | 10 ++++++++++ .../cilium-overlay-e2e-job-template.yaml | 2 ++ .../manifests/cilium/v1.18/ebpf/overlay/cilium.yaml | 10 +++++++--- .../manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml | 10 +++++++--- 5 files changed, 28 insertions(+), 7 deletions(-) diff --git a/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml index 6b9a200d17..555fddeacc 100644 --- a/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml +++ b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-job-template.yaml @@ -10,6 +10,7 @@ parameters: stages: - stage: ${{ parameters.clusterName }} + condition: and(succeeded(), not(or(startsWith(variables['CILIUM_VERSION_TAG'], 'v1.13'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.14'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.15'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.16')))) displayName: Create Cluster - ${{ parameters.displayName }} dependsOn: - ${{ parameters.dependsOn }} @@ -32,6 +33,7 @@ stages: region: $(REGION_AKS_CLUSTER_TEST) - stage: ${{ parameters.name }} + condition: and(succeeded(), not(or(startsWith(variables['CILIUM_VERSION_TAG'], 'v1.13'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.14'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.15'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.16')))) displayName: E2E - ${{ parameters.displayName }} dependsOn: - setup @@ -44,7 +46,6 @@ stages: GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path GOBIN: "$(GOPATH)/bin" # Go binaries path modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" - condition: and(succeeded(), eq(variables.TAG, variables.CURRENT_VERSION)) pool: name: $(BUILD_POOL_NAME_DEFAULT) jobs: diff --git a/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml index ff825116f9..816b254b7e 100644 --- a/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-ebpf/cilium-e2e-step-template.yaml @@ -21,6 +21,16 @@ steps: kubectl get pods -Aowide # see makefile + echo "install Cilium ${CILIUM_VERSION_TAG}" + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + + # ebpf host routing is only supported on 1.17+ + if [[ "${DIR}" == "1.13" || "${DIR}" == "1.14" || "${DIR}" == "1.15" || "${DIR}" == "1.16" ]]; then + echo "WARNING: Incompatible cilium version ${CILIUM_VERSION_TAG} for ebpf host routing" + fi + echo "selected cilium directory ${DIR}" + export EBPF_CILIUM_DIR=${DIR} + export EBPF_CILIUM_VERSION_TAG=${CILIUM_VERSION_TAG} export AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY=acnpublic.azurecr.io export AZURE_IPTABLES_MONITOR_TAG=$(make azure-iptables-monitor-version) make -C ./hack/aks deploy-ebpf-podsubnet-cilium diff --git a/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml b/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml index cf15021dc8..e41955a309 100644 --- a/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml +++ b/.pipelines/singletenancy/cilium-overlay-ebpf/cilium-overlay-e2e-job-template.yaml @@ -10,6 +10,7 @@ parameters: stages: - stage: ${{ parameters.clusterName }} + condition: and(succeeded(), not(or(startsWith(variables['CILIUM_VERSION_TAG'], 'v1.13'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.14'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.15'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.16')))) displayName: Create Cluster - ${{ parameters.displayName }} dependsOn: - ${{ parameters.dependsOn }} @@ -32,6 +33,7 @@ stages: region: $(REGION_AKS_CLUSTER_TEST) - stage: ${{ parameters.name }} + condition: and(succeeded(), not(or(startsWith(variables['CILIUM_VERSION_TAG'], 'v1.13'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.14'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.15'), startsWith(variables['CILIUM_VERSION_TAG'], 'v1.16')))) displayName: E2E - ${{ parameters.displayName }} dependsOn: - setup diff --git a/test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml b/test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml index fde0f2bdf6..b2ac592f6f 100644 --- a/test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml +++ b/test/integration/manifests/cilium/v1.18/ebpf/overlay/cilium.yaml @@ -262,7 +262,9 @@ spec: - mountPath: /etc/config name: iptables-config - command: - - /install-plugin.sh + - sh + args: + - "/install-plugin.sh" image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG imagePullPolicy: IfNotPresent name: install-cni-binaries @@ -352,7 +354,7 @@ spec: - args: - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf command: - - /bin/bash + - sh - -c - -- image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG @@ -368,7 +370,9 @@ spec: mountPropagation: Bidirectional name: bpf-maps - command: - - /init-container.sh + - sh + args: + - "/init-container.sh" env: - name: CILIUM_ALL_STATE valueFrom: diff --git a/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml index 0760191fc2..9ac19c59bd 100644 --- a/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml +++ b/test/integration/manifests/cilium/v1.18/ebpf/podsubnet/cilium.yaml @@ -247,7 +247,9 @@ spec: - mountPath: /etc/config name: iptables-config - command: - - /install-plugin.sh + - sh + args: + - "/install-plugin.sh" image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG imagePullPolicy: IfNotPresent name: install-cni-binaries @@ -337,7 +339,7 @@ spec: - args: - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf command: - - /bin/bash + - sh - -c - -- image: $CILIUM_IMAGE_REGISTRY/cilium/cilium-distroless-init:$CILIUM_VERSION_TAG @@ -353,7 +355,9 @@ spec: mountPropagation: Bidirectional name: bpf-maps - command: - - /init-container.sh + - sh + args: + - "/init-container.sh" env: - name: CILIUM_ALL_STATE valueFrom: From c04353ba4226881ee37714cc49f574fabfee2631 Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Thu, 18 Dec 2025 17:31:16 -0800 Subject: [PATCH 37/47] feat: select linux network mode based on conflist instead of statefile (#4174) * select linux network mode based on runtime nwcfg instead of from statefile * add unit test * use nwCfg.Mode as it has same val as epInfo.Mode --- cni/network/network.go | 4 +- cni/network/network_linux_test.go | 2 +- cni/network/network_windows_test.go | 2 +- network/endpoint.go | 4 +- network/endpoint_linux.go | 18 ++++----- network/endpoint_linux_test.go | 60 +++++++++++++++++++++++++++++ network/endpoint_test.go | 8 +++- network/endpoint_windows.go | 4 +- network/endpoint_windows_test.go | 10 ++--- network/manager.go | 14 +++---- network/manager_mock.go | 2 +- 11 files changed, 96 insertions(+), 32 deletions(-) diff --git a/cni/network/network.go b/cni/network/network.go index a3fa3784d9..5eb4552b91 100644 --- a/cni/network/network.go +++ b/cni/network/network.go @@ -609,7 +609,7 @@ func (plugin *NetPlugin) Add(args *cniSkel.CmdArgs) error { // Delete all endpoints for _, epInfo := range epInfos { - deleteErr := plugin.nm.DeleteEndpoint(epInfo.NetworkID, epInfo.EndpointID, epInfo) + deleteErr := plugin.nm.DeleteEndpoint(epInfo.NetworkID, epInfo.EndpointID, epInfo, nwCfg.Mode) if deleteErr != nil { // we already do not return an error when the endpoint is not found, so deleteErr is a real error logger.Error("Could not delete endpoint after detecting add failure", zap.String("epInfo", epInfo.PrettyString()), zap.Error(deleteErr)) @@ -1124,7 +1124,7 @@ func (plugin *NetPlugin) Delete(args *cniSkel.CmdArgs) error { // delete endpoints for _, epInfo := range epInfos { // in stateless, network id is not populated in epInfo, but in stateful cni, it is (nw id is used in stateful) - if err = plugin.nm.DeleteEndpoint(epInfo.NetworkID, epInfo.EndpointID, epInfo); err != nil { + if err = plugin.nm.DeleteEndpoint(epInfo.NetworkID, epInfo.EndpointID, epInfo, nwCfg.Mode); err != nil { // An error will not be returned if the endpoint is not found // return a retriable error so the container runtime will retry this DEL later // the implementation of this function returns nil if the endpoint doens't exist, so diff --git a/cni/network/network_linux_test.go b/cni/network/network_linux_test.go index 97304569b3..38c1f000f1 100644 --- a/cni/network/network_linux_test.go +++ b/cni/network/network_linux_test.go @@ -523,7 +523,7 @@ func TestPluginLinuxAdd(t *testing.T) { if epID == "none" { t.Fail() } - err = tt.plugin.nm.DeleteEndpoint("", epID, nil) + err = tt.plugin.nm.DeleteEndpoint("", epID, nil, "") require.NoError(t, err) } diff --git a/cni/network/network_windows_test.go b/cni/network/network_windows_test.go index bcf3199e31..5d1189a350 100644 --- a/cni/network/network_windows_test.go +++ b/cni/network/network_windows_test.go @@ -1339,7 +1339,7 @@ func TestPluginWindowsAdd(t *testing.T) { if epID == "none" { t.Fail() } - err = tt.plugin.nm.DeleteEndpoint("", epID, nil) + err = tt.plugin.nm.DeleteEndpoint("", epID, nil, "") require.NoError(t, err) } diff --git a/network/endpoint.go b/network/endpoint.go index 08f2e81f6c..706069d7a1 100644 --- a/network/endpoint.go +++ b/network/endpoint.go @@ -221,7 +221,7 @@ func (nw *network) newEndpoint( // DeleteEndpoint deletes an existing endpoint from the network. func (nw *network) deleteEndpoint(nl netlink.NetlinkInterface, plc platform.ExecClient, nioc netio.NetIOInterface, nsc NamespaceClientInterface, - iptc ipTablesClient, dhcpc dhcpClient, endpointID string, + iptc ipTablesClient, dhcpc dhcpClient, endpointID string, mode string, ) error { var err error @@ -241,7 +241,7 @@ func (nw *network) deleteEndpoint(nl netlink.NetlinkInterface, plc platform.Exec // Call the platform implementation. // Pass nil for epClient and will be initialized in deleteEndpointImpl - err = nw.deleteEndpointImpl(nl, plc, nil, nioc, nsc, iptc, dhcpc, ep) + err = nw.deleteEndpointImpl(nl, plc, nil, nioc, nsc, iptc, dhcpc, ep, mode) if err != nil { return err } diff --git a/network/endpoint_linux.go b/network/endpoint_linux.go index 5f57a66d51..d86b151a86 100644 --- a/network/endpoint_linux.go +++ b/network/endpoint_linux.go @@ -139,7 +139,7 @@ func (nw *network) newEndpointImpl( if epClient == nil { //nolint:gocritic if vlanid != 0 { - if nw.Mode == opModeTransparentVlan { + if epInfo.Mode == opModeTransparentVlan { logger.Info("Transparent vlan client") if _, ok := epInfo.Data[SnatBridgeIPKey]; ok { nw.SnatBridgeIP = epInfo.Data[SnatBridgeIPKey].(string) @@ -163,15 +163,15 @@ func (nw *network) newEndpointImpl( plc, iptc) } - } else if nw.Mode != opModeTransparent { + } else if epInfo.Mode != opModeTransparent { logger.Info("Bridge client") - epClient = NewLinuxBridgeEndpointClient(nw.extIf, hostIfName, contIfName, nw.Mode, nl, plc) + epClient = NewLinuxBridgeEndpointClient(nw.extIf, hostIfName, contIfName, epInfo.Mode, nl, plc) } else if epInfo.NICType == cns.NodeNetworkInterfaceFrontendNIC { logger.Info("Secondary client") epClient = NewSecondaryEndpointClient(nl, netioCli, plc, nsc, dhcpclient, ep) } else { logger.Info("Transparent client") - epClient = NewTransparentEndpointClient(nw.extIf, hostIfName, contIfName, nw.Mode, nl, netioCli, plc) + epClient = NewTransparentEndpointClient(nw.extIf, hostIfName, contIfName, epInfo.Mode, nl, netioCli, plc) } } @@ -266,7 +266,7 @@ func (nw *network) newEndpointImpl( // deleteEndpointImpl deletes an existing endpoint from the network. func (nw *network) deleteEndpointImpl(nl netlink.NetlinkInterface, plc platform.ExecClient, epClient EndpointClient, nioc netio.NetIOInterface, nsc NamespaceClientInterface, - iptc ipTablesClient, dhcpc dhcpClient, ep *endpoint, + iptc ipTablesClient, dhcpc dhcpClient, ep *endpoint, mode string, ) error { // Delete the veth pair by deleting one of the peer interfaces. // Deleting the host interface is more convenient since it does not require @@ -277,13 +277,13 @@ func (nw *network) deleteEndpointImpl(nl netlink.NetlinkInterface, plc platform. //nolint:gocritic if ep.VlanID != 0 { epInfo := ep.getInfo() - if nw.Mode == opModeTransparentVlan { + if mode == opModeTransparentVlan { epClient = NewTransparentVlanEndpointClient(nw, epInfo, ep.HostIfName, "", ep.VlanID, ep.LocalIP, nl, plc, nsc, iptc) } else { epClient = NewOVSEndpointClient(nw, epInfo, ep.HostIfName, "", ep.VlanID, ep.LocalIP, nl, ovsctl.NewOvsctl(), plc, iptc) } - } else if nw.Mode != opModeTransparent { - epClient = NewLinuxBridgeEndpointClient(nw.extIf, ep.HostIfName, "", nw.Mode, nl, plc) + } else if mode != opModeTransparent { + epClient = NewLinuxBridgeEndpointClient(nw.extIf, ep.HostIfName, "", mode, nl, plc) } else { // delete if secondary interfaces populated or endpoint of type delegated (new way) if len(ep.SecondaryInterfaces) > 0 || ep.NICType == cns.NodeNetworkInterfaceFrontendNIC { @@ -297,7 +297,7 @@ func (nw *network) deleteEndpointImpl(nl netlink.NetlinkInterface, plc platform. } } - epClient = NewTransparentEndpointClient(nw.extIf, ep.HostIfName, "", nw.Mode, nl, nioc, plc) + epClient = NewTransparentEndpointClient(nw.extIf, ep.HostIfName, "", mode, nl, nioc, plc) } } diff --git a/network/endpoint_linux_test.go b/network/endpoint_linux_test.go index 58f3ae43dd..721e629174 100644 --- a/network/endpoint_linux_test.go +++ b/network/endpoint_linux_test.go @@ -4,8 +4,11 @@ import ( "net" "testing" + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/iptables" "github.com/Azure/azure-container-networking/netio" "github.com/Azure/azure-container-networking/netlink" + "github.com/Azure/azure-container-networking/platform" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/pkg/errors" @@ -134,4 +137,61 @@ var _ = Describe("Test TestEndpointLinux", func() { Expect(err).ToNot(BeNil()) }) }) + + Describe("Test Add and Delete Endpoint Linux", func() { + nw2 := &network{ + Endpoints: map[string]*endpoint{}, + Mode: opModeBridge, + extIf: &externalInterface{IPv4Gateway: net.ParseIP("192.168.0.1")}, + } + _, dummyIPNet, _ := net.ParseCIDR("10.0.0.0/24") + epInfo2 := &EndpointInfo{ + EndpointID: "768e8deb-eth1", + Data: make(map[string]interface{}), + IfName: eth0IfName, + NICType: cns.InfraNIC, + ContainerID: "0ea7476f26d192f067abdc8b3df43ce3cdbe324386e1c010cb48de87eefef480", + Mode: opModeTransparent, + IPAddresses: []net.IPNet{*dummyIPNet}, + } + It("Should ignore the network struct network mode and use the epInfo network mode during add", func() { + // check that we select the transparent endpoint based on epINfo, in which case the below command is run + transparentRun := false + checkTransparentRun := func(cmd string) (string, error) { + if cmd == "echo 1 > /proc/sys/net/ipv4/conf/azv768e8de/proxy_arp" { + transparentRun = true + } + return "", nil + } + pl := platform.NewMockExecClient(false) + pl.SetExecRawCommand(checkTransparentRun) + + ep, err := nw2.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), pl, + netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo2) + Expect(err).NotTo(HaveOccurred()) + Expect(ep).NotTo(BeNil()) + Expect(ep.Id).To(Equal(epInfo2.EndpointID)) + Expect(transparentRun).To(BeTrue()) + }) + It("Should use the passed in mode during delete", func() { + // check that we select the transparent endpoint based on epInfo, in which case we remove routes + transparentRun := false + checkTransparentRun := func(_ *netlink.Route) error { + transparentRun = true + return nil + } + nl := netlink.NewMockNetlink(false, "") + nl.SetDeleteRouteValidationFn(checkTransparentRun) + + ep2, err := nw2.newEndpointImpl(nil, netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), + netio.NewMockNetIO(false, 0), nil, NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, epInfo2) + Expect(err).ToNot(HaveOccurred()) + Expect(ep2).ToNot(BeNil()) + // Deleting the endpoint + //nolint:errcheck // ignore error + nw2.deleteEndpointImpl(nl, platform.NewMockExecClient(false), nil, netio.NewMockNetIO(false, 0), + NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep2, opModeTransparent) + Expect(transparentRun).To(BeTrue()) + }) + }) }) diff --git a/network/endpoint_test.go b/network/endpoint_test.go index bc31c3b3ac..3361989e77 100644 --- a/network/endpoint_test.go +++ b/network/endpoint_test.go @@ -258,11 +258,13 @@ var _ = Describe("Test Endpoint", func() { Expect(len(mockCli.endpoints)).To(Equal(1)) // Deleting the endpoint //nolint:errcheck // ignore error - nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep2) + nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), + NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep2, "") Expect(len(mockCli.endpoints)).To(Equal(0)) // Deleting same endpoint with same id should not fail //nolint:errcheck // ignore error - nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep2) + nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, netio.NewMockNetIO(false, 0), + NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep2, "") Expect(len(mockCli.endpoints)).To(Equal(0)) }) }) @@ -332,12 +334,14 @@ var _ = Describe("Test Endpoint", func() { EndpointID: "768e8deb-eth1", IfName: eth0IfName, NICType: cns.InfraNIC, + Mode: opModeTransparent, } secondaryEpInfo := &EndpointInfo{ // When we create the secondary endpoint infos while looping over the interface infos, we pass in the same endpoint id EndpointID: "768e8deb-eth1", NICType: cns.NodeNetworkInterfaceFrontendNIC, Routes: []RouteInfo{{Dst: *ipnet}}, + Mode: opModeTransparent, } It("Should not add endpoint to the network when there is an error", func() { diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index 7b842dfe32..a2b7d03b3d 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -542,7 +542,7 @@ func (nw *network) newEndpointImplHnsV2(cli apipaClient, epInfo *EndpointInfo) ( // deleteEndpointImpl deletes an existing endpoint from the network. func (nw *network) deleteEndpointImpl(_ netlink.NetlinkInterface, _ platform.ExecClient, _ EndpointClient, _ netio.NetIOInterface, _ NamespaceClientInterface, - _ ipTablesClient, _ dhcpClient, ep *endpoint, + _ ipTablesClient, _ dhcpClient, ep *endpoint, _ string, ) error { // endpoint deletion is not required for IB if ep.NICType == cns.BackendNIC { @@ -663,7 +663,7 @@ func (epInfo *EndpointInfo) GetEndpointInfoByIPImpl(ipAddresses []net.IPNet, net } } } - return epInfo, errors.Wrapf(err, "No HNSEndpointID matches the IPAddress: "+ipAddresses[0].IP.String()) + return epInfo, errors.Wrapf(err, "No HNSEndpointID matches the IPAddress: %s", ipAddresses[0].IP.String()) } // Get PnP Device ID diff --git a/network/endpoint_windows_test.go b/network/endpoint_windows_test.go index a65add6215..98c572f816 100644 --- a/network/endpoint_windows_test.go +++ b/network/endpoint_windows_test.go @@ -108,7 +108,7 @@ func TestDeleteEndpointImplHnsV2ForIB(t *testing.T) { mockCli := NewMockEndpointClient(nil) err := nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, - netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, &ep) + netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, &ep, "") if err != nil { t.Fatal("endpoint deletion for IB is executed") } @@ -136,7 +136,7 @@ func TestDeleteEndpointImplHnsV2WithEmptyHNSID(t *testing.T) { // should return nil because HnsID is empty mockCli := NewMockEndpointClient(nil) err := nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, - netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, &ep) + netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, &ep, "") if err != nil { t.Fatal("endpoint deletion gets executed") } @@ -565,7 +565,7 @@ func TestCreateAndDeleteEndpointImplHnsv2ForDelegatedHappyPath(t *testing.T) { mockCli := NewMockEndpointClient(nil) err = nw.deleteEndpointImpl(netlink.NewMockNetlink(false, ""), platform.NewMockExecClient(false), mockCli, - netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep) + netio.NewMockNetIO(false, 0), NewMockNamespaceClient(), iptables.NewClient(), &mockDHCP{}, ep, "") if err != nil { t.Fatalf("Failed to delete endpoint for Delegated NIC due to %v", err) } @@ -668,13 +668,13 @@ func TestDeleteEndpointStateForInfraDelegatedNIC(t *testing.T) { // mock DeleteEndpointStateless() to make sure endpoint and network is deleted from cache // network and endpoint should be deleted from cache for delegatedNIC - err = nm.DeleteEndpointStateless(networkID, delegatedEpInfo) + err = nm.DeleteEndpointStateless(networkID, delegatedEpInfo, "") if err != nil { t.Fatalf("Failed to delete endpoint for delegatedNIC state due to %v", err) } // endpoint should be deleted from cache for delegatedNIC and network is still there - err = nm.DeleteEndpointStateless(infraNetworkID, infraEpInfo) + err = nm.DeleteEndpointStateless(infraNetworkID, infraEpInfo, "") if err != nil { t.Fatalf("Failed to delete endpoint for delegatedNIC state due to %v", err) } diff --git a/network/manager.go b/network/manager.go index bef8858087..040c09ec58 100644 --- a/network/manager.go +++ b/network/manager.go @@ -107,7 +107,7 @@ type NetworkManager interface { CreateEndpoint(client apipaClient, networkID string, epInfo *EndpointInfo) error EndpointCreate(client apipaClient, epInfos []*EndpointInfo) error // TODO: change name - DeleteEndpoint(networkID string, endpointID string, epInfo *EndpointInfo) error + DeleteEndpoint(networkID string, endpointID string, epInfo *EndpointInfo, mode string) error GetEndpointInfo(networkID string, endpointID string) (*EndpointInfo, error) GetAllEndpoints(networkID string) (map[string]*EndpointInfo, error) GetEndpointInfoBasedOnPODDetails(networkID string, podName string, podNameSpace string, doExactMatchForPodName bool) (*EndpointInfo, error) @@ -398,7 +398,7 @@ func (nm *networkManager) createEndpoint(cli apipaClient, networkID string, epIn if err != nil { logger.Error("Create endpoint failure", zap.Error(err)) logger.Info("Cleanup resources") - delErr := nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, nm.dhcpClient, ep.Id) + delErr := nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, nm.dhcpClient, ep.Id, epInfo.Mode) if delErr != nil { logger.Error("Deleting endpoint after create endpoint failure failed with", zap.Error(delErr)) } @@ -486,13 +486,13 @@ func (nm *networkManager) GetEndpointState(networkID, containerID string) ([]*En } // DeleteEndpoint deletes an existing container endpoint. -func (nm *networkManager) DeleteEndpoint(networkID, endpointID string, epInfo *EndpointInfo) error { +func (nm *networkManager) DeleteEndpoint(networkID, endpointID string, epInfo *EndpointInfo, mode string) error { nm.Lock() defer nm.Unlock() if nm.IsStatelessCNIMode() { // Calls deleteEndpointImpl directly, skipping the get network check; does not call cns - return nm.DeleteEndpointStateless(networkID, epInfo) + return nm.DeleteEndpointStateless(networkID, epInfo, mode) } nw, err := nm.getNetwork(networkID) @@ -500,7 +500,7 @@ func (nm *networkManager) DeleteEndpoint(networkID, endpointID string, epInfo *E return err } - err = nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, nm.dhcpClient, endpointID) + err = nw.deleteEndpoint(nm.netlink, nm.plClient, nm.netio, nm.nsClient, nm.iptablesClient, nm.dhcpClient, endpointID, mode) if err != nil { return err } @@ -508,7 +508,7 @@ func (nm *networkManager) DeleteEndpoint(networkID, endpointID string, epInfo *E return nil } -func (nm *networkManager) DeleteEndpointStateless(networkID string, epInfo *EndpointInfo) error { +func (nm *networkManager) DeleteEndpointStateless(networkID string, epInfo *EndpointInfo, mode string) error { // we want to always use hnsv2 in stateless // hnsv2 is only enabled if NetNs has a valid guid and the hnsv2 api is supported // by passing in a dummy guid, we satisfy the first condition @@ -542,7 +542,7 @@ func (nm *networkManager) DeleteEndpointStateless(networkID string, epInfo *Endp } logger.Info("Deleting endpoint with", zap.String("Endpoint Info: ", epInfo.PrettyString()), zap.String("HNISID : ", ep.HnsId)) - err := nw.deleteEndpointImpl(netlink.NewNetlink(), platform.NewExecClient(logger), nil, nil, nil, nil, nil, ep) + err := nw.deleteEndpointImpl(netlink.NewNetlink(), platform.NewExecClient(logger), nil, nil, nil, nil, nil, ep, mode) if err != nil { return err } diff --git a/network/manager_mock.go b/network/manager_mock.go index 52ba4f3bc4..8337fae47f 100644 --- a/network/manager_mock.go +++ b/network/manager_mock.go @@ -66,7 +66,7 @@ func (nm *MockNetworkManager) CreateEndpoint(_ apipaClient, _ string, epInfo *En } // DeleteEndpoint mock -func (nm *MockNetworkManager) DeleteEndpoint(_, endpointID string, _ *EndpointInfo) error { +func (nm *MockNetworkManager) DeleteEndpoint(_, endpointID string, _ *EndpointInfo, _ string) error { delete(nm.TestEndpointInfoMap, endpointID) return nil } From 65e43f6cac0a5589d320d9d30be8f9ab1fe6c934 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Dec 2025 13:20:14 -0600 Subject: [PATCH 38/47] deps: bump golang.org/x/crypto from 0.41.0 to 0.45.0 in /azure-ipam (#4130) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.41.0 to 0.45.0. - [Commits](https://github.com/golang/crypto/compare/v0.41.0...v0.45.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.45.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- azure-ipam/go.mod | 12 ++++++------ azure-ipam/go.sum | 28 ++++++++++++++-------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/azure-ipam/go.mod b/azure-ipam/go.mod index 3fbf08f579..374ea18094 100644 --- a/azure-ipam/go.mod +++ b/azure-ipam/go.mod @@ -67,14 +67,14 @@ require ( go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/crypto v0.41.0 // indirect + golang.org/x/crypto v0.45.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.43.0 // indirect + golang.org/x/net v0.47.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.12.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/grpc v1.75.0 // indirect diff --git a/azure-ipam/go.sum b/azure-ipam/go.sum index 8ebe5804a2..e3804a82d9 100644 --- a/azure-ipam/go.sum +++ b/azure-ipam/go.sum @@ -352,8 +352,8 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -372,8 +372,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= @@ -382,8 +382,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -392,14 +392,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -410,8 +410,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 593bbaaf016a0f245a9f96f66686de6c3097616e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Dec 2025 13:20:23 -0600 Subject: [PATCH 39/47] deps: bump golang.org/x/crypto from 0.43.0 to 0.45.0 (#4131) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.43.0 to 0.45.0. - [Commits](https://github.com/golang/crypto/compare/v0.43.0...v0.45.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.45.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 28 ++++++++++++++-------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/go.mod b/go.mod index bf07d7f6ac..f2797b7ea5 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sys v0.37.0 + golang.org/x/sys v0.38.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect google.golang.org/grpc v1.76.0 google.golang.org/protobuf v1.36.10 @@ -98,11 +98,11 @@ require ( github.com/vishvananda/netns v0.0.5 go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.43.0 - golang.org/x/net v0.46.0 + golang.org/x/crypto v0.45.0 + golang.org/x/net v0.47.0 golang.org/x/oauth2 v0.32.0 // indirect - golang.org/x/term v0.36.0 // indirect - golang.org/x/text v0.30.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.14.0 golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -125,7 +125,7 @@ require ( github.com/cilium/cilium v1.15.16 github.com/cilium/ebpf v0.19.0 github.com/jsternberg/zap-logfmt v1.3.0 - golang.org/x/sync v0.17.0 + golang.org/x/sync v0.18.0 gotest.tools/v3 v3.5.2 k8s.io/kubectl v0.34.1 sigs.k8s.io/yaml v1.6.0 diff --git a/go.sum b/go.sum index dbcced8ba9..d7ca1bfb69 100644 --- a/go.sum +++ b/go.sum @@ -443,8 +443,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -471,8 +471,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= @@ -483,8 +483,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -508,20 +508,20 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -535,8 +535,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From d7a06bb36a0d6756bd8df440beb88ad16739f7c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Dec 2025 13:20:32 -0600 Subject: [PATCH 40/47] deps: bump github.com/containernetworking/plugins from 1.8.0 to 1.9.0 in /azure-ipam (#4162) deps: bump github.com/containernetworking/plugins in /azure-ipam Bumps [github.com/containernetworking/plugins](https://github.com/containernetworking/plugins) from 1.8.0 to 1.9.0. - [Release notes](https://github.com/containernetworking/plugins/releases) - [Commits](https://github.com/containernetworking/plugins/compare/v1.8.0...v1.9.0) --- updated-dependencies: - dependency-name: github.com/containernetworking/plugins dependency-version: 1.9.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- azure-ipam/go.mod | 2 +- azure-ipam/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-ipam/go.mod b/azure-ipam/go.mod index 374ea18094..bf19a43fb0 100644 --- a/azure-ipam/go.mod +++ b/azure-ipam/go.mod @@ -5,7 +5,7 @@ go 1.24.2 require ( github.com/Azure/azure-container-networking v1.7.4 github.com/containernetworking/cni v1.3.0 - github.com/containernetworking/plugins v1.8.0 + github.com/containernetworking/plugins v1.9.0 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.0 diff --git a/azure-ipam/go.sum b/azure-ipam/go.sum index e3804a82d9..8e3dc17386 100644 --- a/azure-ipam/go.sum +++ b/azure-ipam/go.sum @@ -81,8 +81,8 @@ github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsP github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= -github.com/containernetworking/plugins v1.8.0 h1:WjGbV/0UQyo8A4qBsAh6GaDAtu1hevxVxsEuqtBqUFk= -github.com/containernetworking/plugins v1.8.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c= +github.com/containernetworking/plugins v1.9.0 h1:Mg3SXBdRGkdXyFC4lcwr6u2ZB2SDeL6LC3U+QrEANuQ= +github.com/containernetworking/plugins v1.9.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c= github.com/coreos/go-iptables v0.8.0 h1:MPc2P89IhuVpLI7ETL/2tx3XZ61VeICZjYqDEgNsPRc= github.com/coreos/go-iptables v0.8.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= From 0903a061a7be1f05a09347083ed3a11b1a3ed540 Mon Sep 17 00:00:00 2001 From: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Date: Tue, 23 Dec 2025 12:24:25 -0800 Subject: [PATCH 41/47] Enable scale tests with 20 pods (#4179) * Enable scale tests with 20 pods * reset pod object delete check intervals. * copilot suggestions for spacing / formatting. * Update .pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Decrease wait time for pod to run from 300 secs to 100 secs. * Set reservation count in pni. * Remove unused field "type" from PNI configuration. * update exponential backoff for blob upload. --------- Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Co-authored-by: sivakami Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .pipelines/swiftv2-long-running/pipeline.yaml | 5 +- .../scripts/create_aks.sh | 4 +- .../scripts/create_storage.sh | 34 +++-- .../scripts/manage_storage_rbac.sh | 26 +++- .../long-running-pipeline-template.yaml | 34 ++++- hack/aks/Makefile | 2 +- .../pod-with-device-plugin.yaml | 38 +++++ .../podnetworkinstance.yaml | 4 - .../integration/swiftv2/helpers/az_helpers.go | 130 +++++++++++------- .../swiftv2/longRunningCluster/datapath.go | 14 +- .../longRunningCluster/datapath_scale_test.go | 57 ++++---- 11 files changed, 246 insertions(+), 102 deletions(-) create mode 100644 test/integration/manifests/swiftv2/long-running-cluster/pod-with-device-plugin.yaml diff --git a/.pipelines/swiftv2-long-running/pipeline.yaml b/.pipelines/swiftv2-long-running/pipeline.yaml index 042af7634b..71d4ef271c 100644 --- a/.pipelines/swiftv2-long-running/pipeline.yaml +++ b/.pipelines/swiftv2-long-running/pipeline.yaml @@ -2,7 +2,7 @@ trigger: none pr: none schedules: - - cron: "0 */3 * * *" # Every 3 hours at minute 0 + - cron: "0 */3 * * *" displayName: "Run tests every 3 hours" branches: include: @@ -18,14 +18,13 @@ parameters: - name: location displayName: "Deployment Region" type: string - default: "centraluseuap" + default: "eastus2" - name: runSetupStages displayName: "Create New Infrastructure Setup" type: boolean default: false - # Setup-only parameters (only used when runSetupStages=true) - name: resourceGroupName displayName: "Resource Group Name used when Create new Infrastructure Setup is selected" type: string diff --git a/.pipelines/swiftv2-long-running/scripts/create_aks.sh b/.pipelines/swiftv2-long-running/scripts/create_aks.sh index 8cc2802de2..625b17ad3e 100644 --- a/.pipelines/swiftv2-long-running/scripts/create_aks.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_aks.sh @@ -9,9 +9,10 @@ VM_SKU_HIGHNIC=$5 DELEGATOR_APP_NAME=$6 DELEGATOR_RG=$7 DELEGATOR_SUB=$8 -DELEGATOR_BASE_URL=${9:-"http://localhost:8080"} +DELEGATOR_BASE_URL=${9:-"http://localhost:8080"} CLUSTER_COUNT=2 +PODS_PER_NODE=7 CLUSTER_PREFIX="aks" @@ -92,6 +93,7 @@ for i in $(seq 1 "$CLUSTER_COUNT"); do AZCLI=az REGION=$LOCATION \ GROUP=$RG \ VM_SIZE=$VM_SKU_HIGHNIC \ + PODS_PER_NODE=$PODS_PER_NODE \ CLUSTER=$CLUSTER_NAME \ SUB=$SUBSCRIPTION_ID diff --git a/.pipelines/swiftv2-long-running/scripts/create_storage.sh b/.pipelines/swiftv2-long-running/scripts/create_storage.sh index 36286c96e8..0ce69e66f0 100644 --- a/.pipelines/swiftv2-long-running/scripts/create_storage.sh +++ b/.pipelines/swiftv2-long-running/scripts/create_storage.sh @@ -46,14 +46,32 @@ for SA in "$SA1" "$SA2"; do && echo "[OK] Container 'test' created in $SA" echo "Uploading test blob to $SA" - az storage blob upload \ - --account-name "$SA" \ - --container-name "test" \ - --name "hello.txt" \ - --data "Hello from Private Endpoint - Storage: $SA" \ - --auth-mode login \ - --overwrite \ - && echo "[OK] Test blob 'hello.txt' uploaded to $SA/test/" + + # Retry blob upload with exponential backoff if RBAC hasn't propagated yet + MAX_RETRIES=5 + SLEEP_TIME=10 + + for i in $(seq 1 $MAX_RETRIES); do + if az storage blob upload \ + --account-name "$SA" \ + --container-name "test" \ + --name "hello.txt" \ + --data "Hello from Private Endpoint - Storage: $SA" \ + --auth-mode login \ + --overwrite 2>&1; then + echo "[OK] Test blob 'hello.txt' uploaded to $SA/test/" + break + else + if [ $i -lt $MAX_RETRIES ]; then + echo "[WARN] Blob upload failed (attempt $i/$MAX_RETRIES). Waiting ${SLEEP_TIME}s for RBAC propagation..." + sleep $SLEEP_TIME + SLEEP_TIME=$((SLEEP_TIME * 2)) + else + echo "[ERROR] Failed to upload blob after $MAX_RETRIES attempts" + exit 1 + fi + fi + done done echo "Removing RBAC role after blob upload" diff --git a/.pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh b/.pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh index f7fbd2d30a..df51db8c12 100644 --- a/.pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh +++ b/.pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh @@ -37,8 +37,32 @@ if [ "$ACTION" == "assign" ]; then --scope "$SA_SCOPE" \ --output none \ && echo "[OK] Role assigned to service principal for $SA" + + echo "==> Verifying RBAC role propagation by testing SAS token generation" + MAX_RETRIES=10 + RETRY_DELAY=15 + + for attempt in $(seq 1 $MAX_RETRIES); do + echo "Attempt $attempt/$MAX_RETRIES: Testing SAS token generation for $SA..." + if az storage blob generate-sas \ + --account-name "$SA" \ + --container-name "test" \ + --name "hello.txt" \ + --permissions r \ + --expiry $(date -u -d "+1 hour" '+%Y-%m-%dT%H:%MZ') \ + --auth-mode login \ + --as-user \ + -o tsv &>/dev/null; then + echo "RBAC propagation verified! SAS token generation successful." + break + else + echo "RBAC not yet propagated. Waiting ${RETRY_DELAY}s before retry..." + sleep $RETRY_DELAY + fi + done + echo "WARNING: RBAC may not be fully propagated after $(($MAX_RETRIES * $RETRY_DELAY))s" done - + elif [ "$ACTION" == "delete" ]; then echo "Removing Storage Blob Data Contributor role from service principal" diff --git a/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml index 356495289c..cfb134b20a 100644 --- a/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml +++ b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml @@ -277,7 +277,7 @@ stages: assign ${{ parameters.subscriptionId }} $(rgName) - "$(DiscoverStorageAccounts.storageAccount1) $(DiscoverStorageAccounts.storageAccount2)" + "$(DiscoverStorageAccounts.storageAccount1)" - task: AzureCLI@2 displayName: "Run Private Endpoint Tests" @@ -312,6 +312,37 @@ stages: ${{ parameters.subscriptionId }} $(rgName) "$(DiscoverStorageAccounts.storageAccount1) $(DiscoverStorageAccounts.storageAccount2)" + + - job: ScaleTest + displayName: "Scale Tests - create and delete pods at scale." + dependsOn: + - PrivateEndpointTests + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Run Scale Test (Create and Delete)" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Running scale test: Create 20 pods with device plugin across both clusters" + echo " - 10 pods in aks-1 (cx_vnet_v1/s1)" + echo " - 10 pods in aks-2 (cx_vnet_v3/s1)" + export RG="$(rgName)" + export BUILD_ID="$(rgName)" + export WORKLOAD_TYPE="swiftv2-linux" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout 1h -tags=scale_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs - job: DeleteTestResources displayName: "Delete PodNetwork, PNI, and Pods" @@ -319,6 +350,7 @@ stages: - CreatePods - ConnectivityTests - PrivateEndpointTests + - ScaleTest condition: always() steps: - task: DownloadPipelineArtifact@2 diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 3b31345ec5..1aaeb464b8 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -447,7 +447,7 @@ linux-swiftv2-nodepool-up: ## Add linux node pool to swiftv2 cluster --os-type Linux \ --max-pods 250 \ --subscription $(SUB) \ - --tags fastpathenabled=true aks-nic-enable-multi-tenancy=true stampcreatorserviceinfo=true\ + --tags fastpathenabled=true aks-nic-enable-multi-tenancy=true stampcreatorserviceinfo=true aks-nic-secondary-count=${PODS_PER_NODE} \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/NetworkingMultiTenancyPreview \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet diff --git a/test/integration/manifests/swiftv2/long-running-cluster/pod-with-device-plugin.yaml b/test/integration/manifests/swiftv2/long-running-cluster/pod-with-device-plugin.yaml new file mode 100644 index 0000000000..e80f8a89fa --- /dev/null +++ b/test/integration/manifests/swiftv2/long-running-cluster/pod-with-device-plugin.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ .PodName }} + namespace: {{ .Namespace }} + labels: + kubernetes.azure.com/pod-network-instance: {{ .PNIName }} + kubernetes.azure.com/pod-network: {{ .PNName }} +spec: + containers: + - name: net-debugger + image: {{ .Image }} + command: ["/bin/bash", "-c"] + args: + - | + echo "Pod Network Diagnostics started on $(hostname)" + echo "Pod IP: $(hostname -i)" + echo "Starting TCP listener on port 8080" + + # Start netcat listener that responds to connections + while true; do + echo "TCP Connection Success from $(hostname) at $(date)" | nc -l -p 8080 + done + ports: + - containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 300m + memory: 600Mi + acn.azure.com/vnet-nic: "1" + requests: + cpu: 300m + memory: 600Mi + acn.azure.com/vnet-nic: "1" + securityContext: + privileged: true + restartPolicy: Always \ No newline at end of file diff --git a/test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml b/test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml index 4d1f8ca384..783b63777e 100644 --- a/test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml +++ b/test/integration/manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml @@ -6,8 +6,4 @@ metadata: spec: podNetworkConfigs: - podNetwork: {{ .PNName }} - {{- if eq .Type "explicit" }} podIPReservationSize: {{ .Reservations }} - {{- else }} - podIPReservationSize: 1 - {{- end }} diff --git a/test/integration/swiftv2/helpers/az_helpers.go b/test/integration/swiftv2/helpers/az_helpers.go index 484e66b138..e5d2f05b1f 100644 --- a/test/integration/swiftv2/helpers/az_helpers.go +++ b/test/integration/swiftv2/helpers/az_helpers.go @@ -10,16 +10,15 @@ import ( ) var ( - // ErrPodNotRunning is returned when a pod does not reach Running state ErrPodNotRunning = errors.New("pod did not reach Running state") - // ErrPodNoIP is returned when a pod has no IP address assigned ErrPodNoIP = errors.New("pod has no IP address assigned") - // ErrPodNoEth1IP is returned when a pod has no eth1 IP address (delegated subnet not configured) ErrPodNoEth1IP = errors.New("pod has no eth1 IP address (delegated subnet not configured?)") - // ErrPodContainerNotReady is returned when a pod container is not ready ErrPodContainerNotReady = errors.New("pod container not ready") - // ErrMTPNCStuckDeletion is returned when MTPNC resources are stuck and not deleted ErrMTPNCStuckDeletion = errors.New("MTPNC resources should have been deleted but were found") + ErrPodDeletionFailed = errors.New("pod still exists after deletion attempts") + ErrPNIDeletionFailed = errors.New("PodNetworkInstance still exists after deletion attempts") + ErrPNDeletionFailed = errors.New("PodNetwork still exists after deletion attempts") + ErrNamespaceDeletionFailed = errors.New("namespace still exists after deletion attempts") ) func runAzCommand(cmd string, args ...string) (string, error) { @@ -88,8 +87,6 @@ func EnsureNamespaceExists(kubeconfig, namespace string) error { // DeletePod deletes a pod in the specified namespace and waits for it to be fully removed func DeletePod(kubeconfig, namespace, podName string) error { fmt.Printf("Deleting pod %s in namespace %s...\n", podName, namespace) - - // Initiate pod deletion with context timeout ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) defer cancel() @@ -97,7 +94,7 @@ func DeletePod(kubeconfig, namespace, podName string) error { out, err := cmd.CombinedOutput() if err != nil { if errors.Is(ctx.Err(), context.DeadlineExceeded) { - fmt.Printf("kubectl delete pod command timed out after 90s, attempting force delete...\n") + fmt.Printf("Warning: kubectl delete pod command timed out after 90s\n") } else { return fmt.Errorf("failed to delete pod %s in namespace %s: %w\nOutput: %s", podName, namespace, err, string(out)) } @@ -106,14 +103,13 @@ func DeletePod(kubeconfig, namespace, podName string) error { // Wait for pod to be completely gone (critical for IP release) fmt.Printf("Waiting for pod %s to be fully removed...\n", podName) for attempt := 1; attempt <= 30; attempt++ { - checkCtx, checkCancel := context.WithTimeout(context.Background(), 10*time.Second) + checkCtx, checkCancel := context.WithTimeout(context.Background(), 20*time.Second) checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "pod", podName, "-n", namespace, "--ignore-not-found=true", "-o", "name") checkOut, _ := checkCmd.CombinedOutput() checkCancel() if strings.TrimSpace(string(checkOut)) == "" { fmt.Printf("Pod %s fully removed after %d seconds\n", podName, attempt*2) - // Extra wait to ensure IP reservation is released in DNC time.Sleep(5 * time.Second) return nil } @@ -124,39 +120,31 @@ func DeletePod(kubeconfig, namespace, podName string) error { time.Sleep(2 * time.Second) } - // If pod still exists after 60 seconds, force delete - fmt.Printf("Pod %s still exists after 60s, attempting force delete...\n", podName) - ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - forceCmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "delete", "pod", podName, "-n", namespace, "--grace-period=0", "--force", "--ignore-not-found=true") - forceOut, forceErr := forceCmd.CombinedOutput() - if forceErr != nil { - fmt.Printf("Warning: Force delete failed: %s\n%s\n", forceErr, string(forceOut)) - } - - // Wait a bit more for force delete to complete - time.Sleep(10 * time.Second) - fmt.Printf("Pod %s deletion completed (may have required force)\n", podName) - return nil + return fmt.Errorf("%w: pod %s still exists", ErrPodDeletionFailed, podName) } -// DeletePodNetworkInstance deletes a PodNetworkInstance and waits for it to be removed func DeletePodNetworkInstance(kubeconfig, namespace, pniName string) error { fmt.Printf("Deleting PodNetworkInstance %s in namespace %s...\n", pniName, namespace) - // Initiate PNI deletion - cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "delete", "podnetworkinstance", pniName, "-n", namespace, "--ignore-not-found=true") + ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "delete", "podnetworkinstance", pniName, "-n", namespace, "--ignore-not-found=true") out, err := cmd.CombinedOutput() if err != nil { - return fmt.Errorf("failed to delete PodNetworkInstance %s: %w\nOutput: %s", pniName, err, string(out)) + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + fmt.Printf("Warning: kubectl delete PNI command timed out after 90s\n") + } else { + return fmt.Errorf("failed to delete PodNetworkInstance %s: %w\nOutput: %s", pniName, err, string(out)) + } } - // Wait for PNI to be completely gone (it may take time for DNC to release reservations) fmt.Printf("Waiting for PodNetworkInstance %s to be fully removed...\n", pniName) - for attempt := 1; attempt <= 60; attempt++ { - checkCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "podnetworkinstance", pniName, "-n", namespace, "--ignore-not-found=true", "-o", "name") + for attempt := 1; attempt <= 30; attempt++ { + checkCtx, checkCancel := context.WithTimeout(context.Background(), 20*time.Second) + checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "podnetworkinstance", pniName, "-n", namespace, "--ignore-not-found=true", "-o", "name") checkOut, _ := checkCmd.CombinedOutput() + checkCancel() if strings.TrimSpace(string(checkOut)) == "" { fmt.Printf("PodNetworkInstance %s fully removed after %d seconds\n", pniName, attempt*2) @@ -164,22 +152,19 @@ func DeletePodNetworkInstance(kubeconfig, namespace, pniName string) error { } if attempt%10 == 0 { - // Check for ReservationInUse errors descCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "describe", "podnetworkinstance", pniName, "-n", namespace) descOut, _ := descCmd.CombinedOutput() descStr := string(descOut) - if strings.Contains(descStr, "ReservationInUse") { - fmt.Printf("PNI %s still has active reservations (attempt %d/60). Waiting for DNC to release...\n", pniName, attempt) + fmt.Printf("PNI %s still has active reservations (attempt %d/30). Waiting for DNC to release...\n", pniName, attempt) } else { - fmt.Printf("PNI %s still terminating (attempt %d/60)...\n", pniName, attempt) + fmt.Printf("PNI %s still terminating (attempt %d/30)...\n", pniName, attempt) } } time.Sleep(2 * time.Second) } - // If PNI still exists after 120 seconds, try to remove finalizers - fmt.Printf("PNI %s still exists after 120s, attempting to remove finalizers...\n", pniName) + fmt.Printf("PNI %s still exists, attempting to remove finalizers...\n", pniName) patchCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "patch", "podnetworkinstance", pniName, "-n", namespace, "-p", `{"metadata":{"finalizers":[]}}`, "--type=merge") patchOut, patchErr := patchCmd.CombinedOutput() if patchErr != nil { @@ -189,25 +174,41 @@ func DeletePodNetworkInstance(kubeconfig, namespace, pniName string) error { time.Sleep(5 * time.Second) } + checkCtx, checkCancel := context.WithTimeout(context.Background(), 10*time.Second) + checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "podnetworkinstance", pniName, "-n", namespace, "--ignore-not-found=true", "-o", "name") + checkOut, _ := checkCmd.CombinedOutput() + checkCancel() + if strings.TrimSpace(string(checkOut)) != "" { + return fmt.Errorf("%w: PodNetworkInstance %s in namespace %s", ErrPNIDeletionFailed, pniName, namespace) + } + fmt.Printf("PodNetworkInstance %s deletion completed\n", pniName) return nil } -// DeletePodNetwork deletes a PodNetwork and waits for it to be removed func DeletePodNetwork(kubeconfig, pnName string) error { fmt.Printf("Deleting PodNetwork %s...\n", pnName) - cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "delete", "podnetwork", pnName, "--ignore-not-found=true") + ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "delete", "podnetwork", pnName, "--ignore-not-found=true") out, err := cmd.CombinedOutput() if err != nil { - return fmt.Errorf("failed to delete PodNetwork %s: %w\nOutput: %s", pnName, err, string(out)) + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + fmt.Printf("Warning: kubectl delete PN command timed out after 90s\n") + } else { + return fmt.Errorf("failed to delete PodNetwork %s: %w\nOutput: %s", pnName, err, string(out)) + } } // Wait for PN to be completely gone fmt.Printf("Waiting for PodNetwork %s to be fully removed...\n", pnName) for attempt := 1; attempt <= 30; attempt++ { - checkCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "podnetwork", pnName, "--ignore-not-found=true", "-o", "name") + checkCtx, checkCancel := context.WithTimeout(context.Background(), 20*time.Second) + checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "podnetwork", pnName, "--ignore-not-found=true", "-o", "name") checkOut, _ := checkCmd.CombinedOutput() + checkCancel() if strings.TrimSpace(string(checkOut)) == "" { fmt.Printf("PodNetwork %s fully removed after %d seconds\n", pnName, attempt*2) @@ -229,6 +230,15 @@ func DeletePodNetwork(kubeconfig, pnName string) error { } time.Sleep(5 * time.Second) + checkCtx, checkCancel := context.WithTimeout(context.Background(), 10*time.Second) + checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "podnetwork", pnName, "--ignore-not-found=true", "-o", "name") + checkOut, _ := checkCmd.CombinedOutput() + checkCancel() + + if strings.TrimSpace(string(checkOut)) != "" { + return fmt.Errorf("%w: PodNetwork %s", ErrPNDeletionFailed, pnName) + } + fmt.Printf("PodNetwork %s deletion completed\n", pnName) return nil } @@ -237,25 +247,34 @@ func DeletePodNetwork(kubeconfig, pnName string) error { func DeleteNamespace(kubeconfig, namespace string) error { fmt.Printf("Deleting namespace %s...\n", namespace) - cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "delete", "namespace", namespace, "--ignore-not-found=true") + ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfig, "delete", "namespace", namespace, "--ignore-not-found=true") out, err := cmd.CombinedOutput() if err != nil { - return fmt.Errorf("failed to delete namespace %s: %w\nOutput: %s", namespace, err, string(out)) + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + fmt.Printf("Warning: kubectl delete namespace command timed out after 90s\n") + } else { + return fmt.Errorf("failed to delete namespace %s: %w\nOutput: %s", namespace, err, string(out)) + } } // Wait for namespace to be completely gone fmt.Printf("Waiting for namespace %s to be fully removed...\n", namespace) - for attempt := 1; attempt <= 60; attempt++ { - checkCmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "namespace", namespace, "--ignore-not-found=true", "-o", "name") + for attempt := 1; attempt <= 30; attempt++ { + checkCtx, checkCancel := context.WithTimeout(context.Background(), 20*time.Second) + checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "namespace", namespace, "--ignore-not-found=true", "-o", "name") checkOut, _ := checkCmd.CombinedOutput() + checkCancel() if strings.TrimSpace(string(checkOut)) == "" { fmt.Printf("Namespace %s fully removed after %d seconds\n", namespace, attempt*2) return nil } - if attempt%15 == 0 { - fmt.Printf("Namespace %s still terminating (attempt %d/60)...\n", namespace, attempt) + if attempt%10 == 0 { + fmt.Printf("Namespace %s still terminating (attempt %d/30)...\n", namespace, attempt) } time.Sleep(2 * time.Second) } @@ -269,6 +288,17 @@ func DeleteNamespace(kubeconfig, namespace string) error { } time.Sleep(5 * time.Second) + + // Verify namespace is actually gone + checkCtx, checkCancel := context.WithTimeout(context.Background(), 10*time.Second) + checkCmd := exec.CommandContext(checkCtx, "kubectl", "--kubeconfig", kubeconfig, "get", "namespace", namespace, "--ignore-not-found=true", "-o", "name") + checkOut, _ := checkCmd.CombinedOutput() + checkCancel() + + if strings.TrimSpace(string(checkOut)) != "" { + return fmt.Errorf("%w: namespace %s", ErrNamespaceDeletionFailed, namespace) + } + fmt.Printf("Namespace %s deletion completed\n", namespace) return nil } @@ -376,22 +406,18 @@ func VerifyNoMTPNC(kubeconfig, buildID string) error { cmd := exec.Command("kubectl", "--kubeconfig", kubeconfig, "get", "mtpnc", "-A", "-o", "json") out, err := cmd.CombinedOutput() if err != nil { - // If MTPNC CRD doesn't exist, that's fine if strings.Contains(string(out), "the server doesn't have a resource type") { return nil } return fmt.Errorf("failed to get MTPNC resources: %w\nOutput: %s", err, string(out)) } - // Parse JSON to check for any MTPNC resources matching our build ID output := string(out) if strings.Contains(output, buildID) { - // Extract MTPNC names for better error reporting lines := strings.Split(output, "\n") var mtpncNames []string for _, line := range lines { if strings.Contains(line, buildID) && strings.Contains(line, "\"name\":") { - // Basic extraction - could be improved with proper JSON parsing mtpncNames = append(mtpncNames, line) } } diff --git a/test/integration/swiftv2/longRunningCluster/datapath.go b/test/integration/swiftv2/longRunningCluster/datapath.go index 67836a74cb..e37aafa163 100644 --- a/test/integration/swiftv2/longRunningCluster/datapath.go +++ b/test/integration/swiftv2/longRunningCluster/datapath.go @@ -75,7 +75,6 @@ type PNIData struct { PNIName string PNName string Namespace string - Type string Reservations int } @@ -109,6 +108,8 @@ type TestResources struct { PNITemplate string PodTemplate string PodImage string + Reservations int + Namespace string } type PodScenario struct { @@ -234,12 +235,15 @@ func CreateNamespaceResource(kubeconfig, namespace string) error { } func CreatePodNetworkInstanceResource(resources TestResources) error { + namespace := resources.Namespace + if namespace == "" { + namespace = resources.PNName + } err := CreatePodNetworkInstance(resources.Kubeconfig, PNIData{ PNIName: resources.PNIName, PNName: resources.PNName, - Namespace: resources.PNName, - Type: "explicit", - Reservations: 2, + Namespace: namespace, + Reservations: resources.Reservations, }, resources.PNITemplate) if err != nil { return fmt.Errorf("failed to create PodNetworkInstance: %w", err) @@ -327,6 +331,7 @@ func CreateScenarioResources(scenario PodScenario, testScenarios TestScenarios) PNITemplate: "../../manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml", PodTemplate: "../../manifests/swiftv2/long-running-cluster/pod.yaml", PodImage: testScenarios.PodImage, + Reservations: 2, } // Step 1: Create PodNetwork @@ -364,7 +369,6 @@ func CreateScenarioResources(scenario PodScenario, testScenarios TestScenarios) if len(nodeInfo.LowNicNodes) == 0 { return fmt.Errorf("%w: scenario %s", ErrNoLowNICNodes, scenario.Name) } - // Find first unused node in the pool (low-NIC nodes can only handle one pod) targetNode = "" for _, node := range nodeInfo.LowNicNodes { if !testScenarios.UsedNodes[node] { diff --git a/test/integration/swiftv2/longRunningCluster/datapath_scale_test.go b/test/integration/swiftv2/longRunningCluster/datapath_scale_test.go index dcb71af15b..8e94fc1c7b 100644 --- a/test/integration/swiftv2/longRunningCluster/datapath_scale_test.go +++ b/test/integration/swiftv2/longRunningCluster/datapath_scale_test.go @@ -18,6 +18,8 @@ import ( func TestDatapathScale(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) + gomega.SetDefaultEventuallyTimeout(50 * time.Minute) + gomega.SetDefaultEventuallyPollingInterval(5 * time.Second) ginkgo.RunSpecs(t, "Datapath Scale Suite") } @@ -29,25 +31,18 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { ginkgo.Fail(fmt.Sprintf("Missing required environment variables: RG='%s', BUILD_ID='%s'", rg, buildId)) } - ginkgo.It("creates and deletes 15 pods in a burst using device plugin", func() { - // NOTE: Maximum pods per PodNetwork/PodNetworkInstance is limited by: - // 1. Subnet IP address capacity - // 2. Node capacity (typically 250 pods per node) - // 3. Available NICs on nodes (device plugin resources) - // For this test: Creating 15 pods across aks-1 and aks-2 + ginkgo.It("creates and deletes 20 pods in a burst using device plugin", func() { // Device plugin and Kubernetes scheduler automatically place pods on nodes with available NICs - - // Define scenarios for both clusters - 8 pods on aks-1, 7 pods on aks-2 (15 total for testing) - // IMPORTANT: Reuse existing PodNetworks from connectivity tests to avoid "duplicate podnetwork with same network id" error + // Define scenarios for both clusters - 10 pods on aks-1, 10 pods on aks-2 (20 total for testing) scenarios := []struct { cluster string vnetName string subnet string podCount int }{ - {cluster: "aks-1", vnetName: "cx_vnet_v1", subnet: "s1", podCount: 8}, - {cluster: "aks-2", vnetName: "cx_vnet_v3", subnet: "s1", podCount: 7}, - } // Initialize test scenarios with cache + {cluster: "aks-1", vnetName: "cx_vnet_v1", subnet: "s1", podCount: 10}, + {cluster: "aks-2", vnetName: "cx_vnet_v3", subnet: "s1", podCount: 10}, + } testScenarios := TestScenarios{ ResourceGroup: rg, BuildID: buildId, @@ -73,9 +68,8 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { resources := TestResources{ Kubeconfig: kubeconfig, - PNName: pnName, // References the shared PodNetwork (also the namespace) - PNIName: pniName, // New PNI for scale test - Namespace: pnName, // Same as PN namespace + PNName: pnName, + PNIName: pniName, VnetGUID: netInfo.VnetGUID, SubnetGUID: netInfo.SubnetGUID, SubnetARMID: netInfo.SubnetARMID, @@ -84,7 +78,7 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { PNITemplate: "../../manifests/swiftv2/long-running-cluster/podnetworkinstance.yaml", PodTemplate: "../../manifests/swiftv2/long-running-cluster/pod-with-device-plugin.yaml", PodImage: testScenarios.PodImage, - Reservations: 20, // Reserve 20 IPs for scale test pods + Reservations: scenario.podCount, } ginkgo.By(fmt.Sprintf("Reusing existing PodNetwork: %s in cluster %s", pnName, scenario.cluster)) @@ -100,7 +94,7 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { for _, s := range scenarios { totalPods += s.podCount } - ginkgo.By(fmt.Sprintf("Creating %d pods in burst (auto-scheduled by device plugin)", totalPods)) + ginkgo.By(fmt.Sprintf("Creating %d pods in burst", totalPods)) var wg sync.WaitGroup errors := make(chan error, totalPods) @@ -116,7 +110,6 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { podName := fmt.Sprintf("scale-pod-%d", idx) ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s in cluster %s (auto-scheduled)", podName, resources.PNName, cluster)) - // Create pod without specifying node - let device plugin and scheduler decide err := CreatePod(resources.Kubeconfig, PodData{ PodName: podName, NodeName: "", @@ -130,10 +123,9 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { errors <- fmt.Errorf("failed to create pod %s in cluster %s: %w", podName, cluster, err) return } - - err = helpers.WaitForPodScheduled(resources.Kubeconfig, resources.PNName, podName, 10, 6) + err = helpers.WaitForPodRunning(resources.Kubeconfig, resources.PNName, podName, 10, 10) if err != nil { - errors <- fmt.Errorf("pod %s in cluster %s was not scheduled: %w", podName, cluster, err) + errors <- fmt.Errorf("pod %s in cluster %s did not reach running state: %w", podName, cluster, err) } }(allResources[i], scenario.cluster, podIndex) podIndex++ @@ -142,7 +134,6 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { wg.Wait() close(errors) - elapsedTime := time.Since(startTime) var errList []error for err := range errors { @@ -150,21 +141,29 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { } gomega.Expect(errList).To(gomega.BeEmpty(), "Some pods failed to create") ginkgo.By(fmt.Sprintf("Successfully created %d pods in %s", totalPods, elapsedTime)) - ginkgo.By("Waiting 30 seconds for pods to stabilize") - time.Sleep(30 * time.Second) + ginkgo.By("Waiting 10 seconds for pods to stabilize") + time.Sleep(10 * time.Second) ginkgo.By("Verifying all pods are in Running state") podIndex = 0 + var verificationErrors []error for i, scenario := range scenarios { for j := 0; j < scenario.podCount; j++ { podName := fmt.Sprintf("scale-pod-%d", podIndex) err := helpers.WaitForPodRunning(allResources[i].Kubeconfig, allResources[i].PNName, podName, 5, 10) - gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Pod %s did not reach running state in cluster %s", podName, scenario.cluster)) + if err != nil { + verificationErrors = append(verificationErrors, fmt.Errorf("pod %s did not reach running state in cluster %s: %w", podName, scenario.cluster, err)) + } podIndex++ } } - ginkgo.By(fmt.Sprintf("All %d pods are running successfully across both clusters", totalPods)) + if len(verificationErrors) == 0 { + ginkgo.By(fmt.Sprintf("All %d pods are running successfully across both clusters", totalPods)) + } else { + ginkgo.By(fmt.Sprintf("WARNING: %d pods failed to reach running state, proceeding to cleanup", len(verificationErrors))) + } + ginkgo.By("Cleaning up scale test resources") podIndex = 0 for i, scenario := range scenarios { @@ -190,5 +189,11 @@ var _ = ginkgo.Describe("Datapath Scale Tests", func() { } ginkgo.By("Scale test cleanup completed") + if len(verificationErrors) > 0 { + for _, err := range verificationErrors { + fmt.Printf("Error: %v\n", err) + } + gomega.Expect(verificationErrors).To(gomega.BeEmpty(), fmt.Sprintf("%d pods failed to reach running state", len(verificationErrors))) + } }) }) From bebad17b781ffc92552fdc6382acdc6d5a31ad6f Mon Sep 17 00:00:00 2001 From: John Payne <89417863+jpayne3506@users.noreply.github.com> Date: Mon, 29 Dec 2025 13:34:48 -0600 Subject: [PATCH 42/47] ci: General Cilium Nightly fixes (#4175) * ci: update cilium nightly config * ci: update namespace check * chore: bump timeout * ci: added ztunnel rbac to nightly operator * fix: use ACN build pool for cluster create * ci: add nightly check template --- .../cni/cilium/nightly-release-test.yml | 12 ++- ...um-dualstackoverlay-e2e-step-template.yaml | 12 +-- .../cilium-dualstackoverlay-e2e.steps.yaml | 15 +--- .../cilium-overlay-e2e-step-template.yaml | 64 +++++---------- .../cilium-overlay-e2e.steps.yaml | 68 +++++----------- .../cilium-overlay-e2e-step-template.yaml | 78 ++++++------------- .../cilium-overlay-e2e.steps.yaml | 78 ++++++------------- .../templates/cilium-nightly-checks.yaml | 50 ++++++++++++ .../cilium/cilium-nightly-config.yaml | 17 ++-- .../cilium/cilium-nightly-operator/role.yaml | 24 ++++++ .../cilium-nightly-operator/rolebinding.yaml | 15 ++++ 11 files changed, 198 insertions(+), 235 deletions(-) create mode 100644 .pipelines/templates/cilium-nightly-checks.yaml create mode 100644 test/integration/manifests/cilium/cilium-nightly-operator/role.yaml create mode 100644 test/integration/manifests/cilium/cilium-nightly-operator/rolebinding.yaml diff --git a/.pipelines/cni/cilium/nightly-release-test.yml b/.pipelines/cni/cilium/nightly-release-test.yml index aeb31c803e..34f18996f5 100644 --- a/.pipelines/cni/cilium/nightly-release-test.yml +++ b/.pipelines/cni/cilium/nightly-release-test.yml @@ -35,7 +35,7 @@ stages: steps: - bash: | set -ex - + cd .pipelines/ git clone https://github.com/cilium/cilium.git cd cilium @@ -65,10 +65,10 @@ stages: fi if [ "$(type)" = "docker-operator-generic-image" ]; then - # Apply patch to Dockerfile + # Apply patch to Dockerfile DOCKERFILE_PATH="./images/$(directory)/Dockerfile" echo "Patching Dockerfile: $DOCKERFILE_PATH" - + # Add ARG and ENV statements to disable systemcrypto for Microsoft Go sed -i '/^FROM.*builder/a ARG GOEXPERIMENT=boringcrypto \nENV GOEXPERIMENT=${GOEXPERIMENT}' "$DOCKERFILE_PATH" fi @@ -76,7 +76,7 @@ stages: BUILD_ARGS=${GO_ARGS}${ALPINE_ARGS} DOCKER_FLAGS="$BUILD_ARGS" \ make $(type) - + name: BuildCiliumImage displayName: "Build Cilium Image" - task: AzureCLI@2 @@ -117,6 +117,8 @@ stages: GOBIN: "$(GOPATH)/bin" # Go binaries path modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + pool: + name: $(BUILD_POOL_NAME_DEFAULT) jobs: - template: ../../templates/create-cluster.yaml parameters: @@ -134,6 +136,7 @@ stages: - cilium_overlay_nightly pool: name: $(BUILD_POOL_NAME_DEFAULT) + timeoutInMinutes: 120 steps: - template: ../../singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml parameters: @@ -141,6 +144,7 @@ stages: clusterName: ciliumnightly-$(commitID) testHubble: true testLRP: true + nightly: true - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml parameters: diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml index 9ef039db57..afa332f451 100644 --- a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml @@ -109,13 +109,6 @@ steps: - template: ../../templates/cilium-connectivity-tests.yaml - - script: | - ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` - echo "##vso[task.setvariable variable=ciliumNamespace]$ns" - retryCountOnTaskFailure: 3 - name: "nsCapture" - displayName: "Capture Connectivity Test Namespace" - - script: | set -e kubectl get po -owide -A @@ -134,7 +127,10 @@ steps: cd test/integration/load CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$ echo "delete cilium connectivity test resources and re-validate state" - kubectl delete ns $(ciliumNamespace) + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` + for namespace in ${ciliumNamespace}; do + kubectl delete ns ${namespace} + done kubectl get pod -owide -A CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$ name: "validatePods" diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml index db342405e1..ef356ba9b9 100644 --- a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml @@ -118,13 +118,6 @@ steps: - template: ../../templates/cilium-connectivity-tests.yaml - - script: | - ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` - echo "##vso[task.setvariable variable=ciliumNamespace]$ns" - retryCountOnTaskFailure: 3 - name: "nsCapture" - displayName: "Capture Connectivity Test Namespace" - - script: | set -e kubectl get po -owide -A @@ -140,13 +133,13 @@ steps: - script: | echo "validate pod IP assignment and check systemd-networkd restart" kubectl get pod -owide -A - # Deleting echo-external-node deployment until cilium version matches TODO. https://github.com/cilium/cilium-cli/issues/67 is addressing the change. - # Saves 17 minutes - kubectl delete deploy -n $(ciliumNamespace) echo-external-node cd test/integration/load CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$ echo "delete cilium connectivity test resources and re-validate state" - kubectl delete ns $(ciliumNamespace) + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` + for namespace in ${ciliumNamespace}; do + kubectl delete ns ${namespace} + done kubectl get pod -owide -A CNI_TYPE=cilium_dualstack go test -timeout 30m -tags load -run ^TestValidateState$ name: "validatePods" diff --git a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml index 3ef28746c8..2e5b6e911e 100644 --- a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml @@ -3,6 +3,7 @@ parameters: clusterName: "" testHubble: false scaleup: "" + nightly: false steps: - bash: | @@ -113,13 +114,6 @@ steps: - template: ../../templates/cilium-connectivity-tests.yaml - - script: | - ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` - echo "##vso[task.setvariable variable=ciliumNamespace]$ns" - retryCountOnTaskFailure: 3 - name: "nsCapture" - displayName: "Capture Connectivity Test Namespace" - - ${{ if eq( parameters['testHubble'], true) }}: - script: | export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} @@ -136,46 +130,26 @@ steps: name: "HubbleConnectivityTests" displayName: "Run Hubble Connectivity Tests" - - script: | - set -e - echo "validate pod IP assignment and check systemd-networkd restart" - kubectl get pod -owide -A - - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" - echo "expect the identities to be deleted when the namespace is deleted" - kubectl get ciliumidentity | grep cilium-test - fi - make test-validate-state - name: "validatePods" - displayName: "Validate Pods" + - ${{if eq( parameters['nightly'], true) }}: + - template: ../../templates/cilium-nightly-checks.yaml + - ${{else }}: + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` - - script: | - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - kubectl get pod -owide -n $(ciliumNamespace) - echo "wait for pod and cilium identity deletion in cilium-test namespace" - while true; do - pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null) - if [[ -z "$pods" ]]; then - echo "No pods found" - break - fi - sleep 2s + make test-validate-state + + echo "delete cilium connectivity test resources and re-validate state" + for namespace in ${ciliumNamespace}; do + kubectl delete ns ${namespace} done - sleep 20s - echo "Verify cilium identities are deleted from cilium-test" - checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')" - if [[ -n $checkIdentity ]]; then - echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace" - exit 1 - else - printf -- "Identities deleted from $(ciliumNamespace) namespace\n" - fi - else - echo "skip cilium identities check for PR pipeline" - fi - name: "CiliumIdentities" - displayName: "Verify Cilium Identities Deletion" + kubectl get pod -owide -A + + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" - script: | echo "Run wireserver and metadata connectivity Tests" diff --git a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml index 950fa5fe1b..f6afd59698 100644 --- a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml @@ -3,6 +3,7 @@ parameters: clusterName: "" testHubble: false scaleup: "" + nightly: false steps: @@ -111,13 +112,6 @@ steps: - template: ../../templates/cilium-connectivity-tests.yaml - - script: | - ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` - echo "##vso[task.setvariable variable=ciliumNamespace]$ns" - retryCountOnTaskFailure: 3 - name: "nsCapture" - displayName: "Capture Connectivity Test Namespace" - - ${{ if eq( parameters['testHubble'], true) }}: - script: | echo "enable Hubble metrics server" @@ -135,50 +129,26 @@ steps: name: "HubbleConnectivityTests" displayName: "Run Hubble Connectivity Tests" - - script: | - set -e - echo "validate pod IP assignment and check systemd-networkd restart" - kubectl get pod -owide -A - - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" - echo "expect the identities to be deleted when the namespace is deleted" - kubectl get ciliumidentity | grep cilium-test - fi - make test-validate-state - echo "delete cilium connectivity test resources and re-validate state" # TODO Delete this and the next 4 lines if connectivity no longer has bug - kubectl delete ns $(ciliumNamespace) - kubectl get pod -owide -A - make test-validate-state - name: "validatePods" - displayName: "Validate Pods" + - ${{if eq( parameters['nightly'], true) }}: + - template: ../../templates/cilium-nightly-checks.yaml + - ${{else }}: + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` - - script: | - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - kubectl get pod -owide -n $(ciliumNamespace) - echo "wait for pod and cilium identity deletion in cilium-test namespace" - while true; do - pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null) - if [[ -z "$pods" ]]; then - echo "No pods found" - break - fi - sleep 2s + make test-validate-state + + echo "delete cilium connectivity test resources and re-validate state" + for namespace in ${ciliumNamespace}; do + kubectl delete ns ${namespace} done - sleep 20s - echo "Verify cilium identities are deleted from cilium-test" - checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')" - if [[ -n $checkIdentity ]]; then - echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace" - exit 1 - else - printf -- "Identities deleted from $(ciliumNamespace) namespace\n" - fi - else - echo "skip cilium identities check for PR pipeline" - fi - name: "CiliumIdentities" - displayName: "Verify Cilium Identities Deletion" + kubectl get pod -owide -A + + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" - script: | echo "Run wireserver and metadata connectivity Tests" diff --git a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml index c620cdfb29..2456b17192 100644 --- a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml @@ -4,6 +4,7 @@ parameters: testHubble: false testLRP: false scaleup: "" + nightly: false steps: @@ -146,13 +147,6 @@ steps: - template: ../../templates/cilium-connectivity-tests.yaml - - script: | - ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` - echo "##vso[task.setvariable variable=ciliumNamespace]$ns" - retryCountOnTaskFailure: 3 - name: "nsCapture" - displayName: "Capture Connectivity Test Namespace" - - ${{ if eq( parameters['testHubble'], true) }}: - script: | echo "enable Hubble metrics server" @@ -171,49 +165,26 @@ steps: name: "HubbleConnectivityTests" displayName: "Run Hubble Connectivity Tests" - - script: | - set -e - echo "validate pod IP assignment and check systemd-networkd restart" - kubectl get pod -owide -A - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" - echo "expect the identities to be deleted when the namespace is deleted" - kubectl get ciliumidentity | grep cilium-test - fi - make test-validate-state - echo "delete cilium connectivity test resources and re-validate state" - kubectl delete ns $(ciliumNamespace) - kubectl get pod -owide -A - make test-validate-state - name: "validatePods" - displayName: "Validate Pods" + - ${{if eq( parameters['nightly'], true) }}: + - template: ../../templates/cilium-nightly-checks.yaml + - ${{else }}: + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` - - script: | - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - kubectl get pod -owide -n $(ciliumNamespace) - echo "wait for pod and cilium identity deletion in $(ciliumNamespace) namespace" - while true; do - pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null) - if [[ -z "$pods" ]]; then - echo "No pods found" - break - fi - sleep 2s + make test-validate-state + + echo "delete cilium connectivity test resources and re-validate state" + for namespace in ${ciliumNamespace}; do + kubectl delete ns ${namespace} done - sleep 20s - echo "Verify cilium identities are deleted from $(ciliumNamespace)" - checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')" - if [[ -n $checkIdentity ]]; then - echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace" - exit 1 - else - printf -- "Identities deleted from $(ciliumNamespace) namespace\n" - fi - else - echo "skip cilium identities check for PR pipeline" - fi - name: "CiliumIdentities" - displayName: "Verify Cilium Identities Deletion" + kubectl get pod -owide -A + + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" - script: | echo "Run wireserver and metadata connectivity Tests" @@ -222,18 +193,15 @@ steps: name: "WireserverMetadataConnectivityTests" displayName: "Run Wireserver and Metadata Connectivity Tests" - - script: | - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - echo "Running nightly, skip async delete test" - else + - ${{if eq( parameters['nightly'], false) }}: + - script: | cd hack/scripts chmod +x async-delete-test.sh ./async-delete-test.sh if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' fi - fi - name: "testAsyncDelete" - displayName: "Verify Async Delete when CNS is down" + name: "testAsyncDelete" + displayName: "Verify Async Delete when CNS is down" - template: ../../templates/cilium-mtu-check.yaml diff --git a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml index a7bcdb4aec..5313896097 100644 --- a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml @@ -4,6 +4,7 @@ parameters: testHubble: false testLRP: false scaleup: "" + nightly: false steps: @@ -144,13 +145,6 @@ steps: - template: ../../templates/cilium-connectivity-tests.yaml - - script: | - ns=`kubectl get ns | grep cilium-test | awk '{print $1}'` - echo "##vso[task.setvariable variable=ciliumNamespace]$ns" - retryCountOnTaskFailure: 3 - name: "nsCapture" - displayName: "Capture Connectivity Test Namespace" - - ${{ if eq( parameters['testHubble'], true) }}: - script: | echo "enable Hubble metrics server" @@ -168,49 +162,26 @@ steps: name: "HubbleConnectivityTests" displayName: "Run Hubble Connectivity Tests" - - script: | - set -e - echo "validate pod IP assignment and check systemd-networkd restart" - kubectl get pod -owide -A - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - echo "Check cilium identities in $(ciliumNamespace) namepsace during nightly run" - echo "expect the identities to be deleted when the namespace is deleted" - kubectl get ciliumidentity | grep cilium-test - fi - make test-validate-state - echo "delete cilium connectivity test resources and re-validate state" - kubectl delete ns $(ciliumNamespace) - kubectl get pod -owide -A - make test-validate-state - name: "validatePods" - displayName: "Validate Pods" + - ${{if eq( parameters['nightly'], true) }}: + - template: ../../templates/cilium-nightly-checks.yaml + - ${{else }}: + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` - - script: | - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - kubectl get pod -owide -n $(ciliumNamespace) - echo "wait for pod and cilium identity deletion in $(ciliumNamespace) namespace" - while true; do - pods=$(kubectl get pods -n $(ciliumNamespace) --no-headers=true 2>/dev/null) - if [[ -z "$pods" ]]; then - echo "No pods found" - break - fi - sleep 2s + make test-validate-state + + echo "delete cilium connectivity test resources and re-validate state" + for namespace in ${ciliumNamespace}; do + kubectl delete ns ${namespace} done - sleep 20s - echo "Verify cilium identities are deleted from $(ciliumNamespace)" - checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')" - if [[ -n $checkIdentity ]]; then - echo "##[error]Cilium Identities still present in $(ciliumNamespace) namespace" - exit 1 - else - printf -- "Identities deleted from $(ciliumNamespace) namespace\n" - fi - else - echo "skip cilium identities check for PR pipeline" - fi - name: "CiliumIdentities" - displayName: "Verify Cilium Identities Deletion" + kubectl get pod -owide -A + + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" - script: | # TODO REMOVE THIS STEP, make test-load covers this set -e @@ -233,19 +204,16 @@ steps: name: "WireserverMetadataConnectivityTests" displayName: "Run Wireserver and Metadata Connectivity Tests" - - script: | - if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - echo "Running nightly, skip async delete test" - else + - ${{if eq( parameters['nightly'], false) }}: + - script: | cd hack/scripts chmod +x async-delete-test.sh ./async-delete-test.sh if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]' fi - fi - name: "testAsyncDelete" - displayName: "Verify Async Delete when CNS is down" + name: "testAsyncDelete" + displayName: "Verify Async Delete when CNS is down" - template: ../../templates/cilium-mtu-check.yaml diff --git a/.pipelines/templates/cilium-nightly-checks.yaml b/.pipelines/templates/cilium-nightly-checks.yaml new file mode 100644 index 0000000000..a9852d42bd --- /dev/null +++ b/.pipelines/templates/cilium-nightly-checks.yaml @@ -0,0 +1,50 @@ +steps: + - script: | + set -e + echo "validate pod IP assignment and check systemd-networkd restart" + kubectl get pod -owide -A + + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` + echo "Check cilium identities in ${ciliumNamespace} namepsace during nightly run" + echo "expect the identities to be deleted when the namespace is deleted" + kubectl get ciliumidentity | grep cilium-test + + make test-validate-state + + echo "delete cilium connectivity test resources and re-validate state" + for namespace in ${ciliumNamespace}; do + kubectl delete ns ${namespace} + done + kubectl get pod -owide -A + + make test-validate-state + name: "validatePods" + displayName: "Validate Pods" + - script: | + ciliumNamespace=`kubectl get ns | grep cilium-test | awk '{print $1}'` + kubectl get pod -Aowide | grep cilium-test + + for namespace in ${ciliumNamespace}; do + echo "Checking namespace: $namespace" + echo "wait for pod and cilium identity deletion in ${namespace} namespace" + while true; do + pods=$(kubectl get pods -n ${namespace} --no-headers=true 2>/dev/null) + if [[ -z "$pods" ]]; then + echo "No pods found" + break + fi + sleep 2s + done + sleep 20s + echo "Verify cilium identities are deleted from ${namespace}" + checkIdentity="$(kubectl get ciliumidentity -o json | grep cilium-test | jq -e 'length == 0')" + if [[ -n $checkIdentity ]]; then + echo "##[error]Cilium Identities still present in ${namespace} namespace" + exit 1 + else + printf -- "Identities deleted from ${namespace} namespace\n" + fi + done + + name: "CiliumIdentities" + displayName: "Verify Cilium Identities Deletion" diff --git a/test/integration/manifests/cilium/cilium-nightly-config.yaml b/test/integration/manifests/cilium/cilium-nightly-config.yaml index 959d0057f1..07169420d3 100644 --- a/test/integration/manifests/cilium/cilium-nightly-config.yaml +++ b/test/integration/manifests/cilium/cilium-nightly-config.yaml @@ -71,7 +71,8 @@ data: tofqdns-min-ttl: "0" tofqdns-proxy-response-max-delay: 100ms routing-mode: native - unmanaged-pod-watcher-interval: "15" + # Updated to require time format in 1.19 + unmanaged-pod-watcher-interval: "15s" vtep-cidr: "" vtep-endpoint: "" vtep-mac: "" @@ -99,21 +100,21 @@ data: cni-log-file: "/var/run/cilium/cilium-cni.log" ipam-cilium-node-update-rate: "15s" egress-gateway-reconciliation-trigger-interval: "1s" - nat-map-stats-entries: "32" - nat-map-stats-interval: "30s" + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" bpf-events-drop-enabled: "true" # exposes drop events to cilium monitor/hubble bpf-events-policy-verdict-enabled: "true" # exposes policy verdict events to cilium monitor/hubble bpf-events-trace-enabled: "true" # exposes trace events to cilium monitor/hubble enable-tcx: "false" # attach endpoint programs with tcx if supported by kernel - datapath-mode: "veth" + datapath-mode: "veth" direct-routing-skip-unreachable: "false" enable-runtime-device-detection: "false" - bpf-lb-sock: "false" - bpf-lb-sock-terminate-pod-connections: "false" - nodeport-addresses: "" + bpf-lb-sock: "false" + bpf-lb-sock-terminate-pod-connections: "false" + nodeport-addresses: "" k8s-require-ipv4-pod-cidr: "false" k8s-require-ipv6-pod-cidr: "false" - enable-node-selector-labels: "false" + enable-node-selector-labels: "false" ## new values for 1.17 ces-slice-mode: "fcfs" enable-cilium-endpoint-slice: "true" diff --git a/test/integration/manifests/cilium/cilium-nightly-operator/role.yaml b/test/integration/manifests/cilium/cilium-nightly-operator/role.yaml new file mode 100644 index 0000000000..12c079d625 --- /dev/null +++ b/test/integration/manifests/cilium/cilium-nightly-operator/role.yaml @@ -0,0 +1,24 @@ + +### v1.19 Additions for ZTunnel management ### +### Required if zTunnel encryption is enabled or disabled +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cilium-operator-ztunnel + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +rules: +# ZTunnel DaemonSet management permissions +# Note: These permissions must always be granted (not conditional on encryption.type) +# because the controller needs to clean up stale DaemonSets when ztunnel is disabled. +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - delete + - get + - list + - watch diff --git a/test/integration/manifests/cilium/cilium-nightly-operator/rolebinding.yaml b/test/integration/manifests/cilium/cilium-nightly-operator/rolebinding.yaml new file mode 100644 index 0000000000..c03e6e93cf --- /dev/null +++ b/test/integration/manifests/cilium/cilium-nightly-operator/rolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cilium-operator-ztunnel + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-operator-ztunnel +subjects: +- kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system From abaa01c4147d5ce60705d64f2a5a407194df63b8 Mon Sep 17 00:00:00 2001 From: Alexander <39818795+QxBytes@users.noreply.github.com> Date: Mon, 5 Jan 2026 12:44:35 -0800 Subject: [PATCH 43/47] ci: move cilium deploy logic to makefile (#4184) * initial makefile changes * initial yaml changes for nightly, build, and pr pipelines * fix path * update for release test pipeline * address comments --- .../cilium-overlay-load-test-template.yaml | 39 +++------- .pipelines/cni/cilium/cilium-scale-test.yaml | 30 ++------ ...um-dualstackoverlay-e2e-step-template.yaml | 18 ++--- .../cilium-dualstackoverlay-e2e.steps.yaml | 25 ++----- .../cilium-nodesubnet-e2e-step-template.yaml | 12 +-- .../cilium-nodesubnet-e2e.steps.yaml | 12 +-- .../cilium-overlay-e2e-step-template.yaml | 8 +- .../cilium-overlay-e2e.steps.yaml | 8 +- .../cilium-overlay-e2e-step-template.yaml | 22 +----- .../cilium-overlay-e2e.steps.yaml | 22 +----- .../cilium/cilium-e2e-step-template.yaml | 13 +--- .../cilium/cilium-e2e.steps.yaml | 13 +--- hack/aks/README.md | 8 ++ hack/aks/deploy.mk | 74 +++++++++++++++++-- 14 files changed, 128 insertions(+), 176 deletions(-) diff --git a/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml b/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml index c0789f590f..47211bff96 100644 --- a/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml +++ b/.pipelines/cni/cilium/cilium-overlay-load-test-template.yaml @@ -93,34 +93,19 @@ stages: if [ ! -z ${{ parameters.dualstackVersion }} ]; then echo "Use dualstack version of Cilium" export CILIUM_VERSION_TAG=${{ parameters.dualstackVersion }} - fi - - echo "install Cilium ${CILIUM_VERSION_TAG}" - export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - - echo "deploy Cilium ConfigMap" - if [ ! -z ${{ parameters.dualstackVersion }} ]; then - echo "Use dualstack configmap for Cilium" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml - else - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - fi - - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - if [ ! -z ${{ parameters.dualstackVersion }} ]; then - echo "Use dualstack daemonset for Cilium" + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) export IPV6_IMAGE_REGISTRY=acnpublic.azurecr.io export IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version) - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - + echo "IPV6_HP_BPF_VERSION: $IPV6_HP_BPF_VERSION" + echo "IPV6_IMAGE_REGISTRY: $IPV6_IMAGE_REGISTRY" + echo "installing cilium dualstack version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium-dualstack else - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium fi - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - kubectl get po -owide -A - ${{if eq(parameters.hubbleEnabled, true)}}: @@ -146,12 +131,8 @@ stages: echo "install Cilium onto Overlay Cluster with hubble enabled" export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "installing cilium hubble version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium-hubble kubectl get po -owide -A - job: deploy_cns_and_ipam diff --git a/.pipelines/cni/cilium/cilium-scale-test.yaml b/.pipelines/cni/cilium/cilium-scale-test.yaml index 523e71c2eb..a24a17432c 100644 --- a/.pipelines/cni/cilium/cilium-scale-test.yaml +++ b/.pipelines/cni/cilium/cilium-scale-test.yaml @@ -46,33 +46,19 @@ stages: echo "Redeploy all cilium components and update cilium version. Redeploy all to catch all changes between versions" pwd - echo "install Cilium ${CILIUM_VERSION_TAG}" export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - - echo "deploy Cilium ConfigMap" if ${IS_DUALSTACK}; then - echo "Use dualstack configmap for Cilium" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml - else - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - fi - - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - export CILIUM_VERSION_TAG=${CILIUM_VERSION_TAG} - export CILIUM_IMAGE_REGISTRY=${CILIUM_IMAGE_REGISTRY} - if ${IS_DUALSTACK}; then - echo "Use dualstack daemonset for Cilium" + echo "Use dualstack version of Cilium" export IPV6_IMAGE_REGISTRY=acnpublic.azurecr.io - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - + export IPV6_HP_BPF_VERSION=${IPV6_HP_BPF_VERSION} + echo "IPV6_HP_BPF_VERSION: $IPV6_HP_BPF_VERSION" + echo "IPV6_IMAGE_REGISTRY: $IPV6_IMAGE_REGISTRY" + echo "installing cilium dualstack version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium-dualstack else - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium fi - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - kubectl get po -owide -A echo "Deploy Azure-CNS" diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml index afa332f451..477a1b5b0c 100644 --- a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e-step-template.yaml @@ -33,20 +33,14 @@ steps: pwd kubectl cluster-info kubectl get po -owide -A - echo "install Cilium ${CILIUM_DUALSTACK_VERSION}" - export DIR=$(echo ${CILIUM_DUALSTACK_VERSION#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - export CILIUM_VERSION_TAG=${CILIUM_DUALSTACK_VERSION} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) export IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version) - echo "install Cilium ${CILIUM_DUALSTACK_VERSION} onto Overlay Cluster" - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "IPV6_HP_BPF_VERSION: $IPV6_HP_BPF_VERSION" + echo "IPV6_IMAGE_REGISTRY: $IPV6_IMAGE_REGISTRY" + # IPV6_IMAGE_REGISTRY is already exported + echo "installing cilium dualstack version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium-dualstack kubectl get po -owide -A name: "installCilium" displayName: "Install Cilium on AKS Dualstack Overlay" diff --git a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml index ef356ba9b9..64e2746450 100644 --- a/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium-dualstack-overlay/cilium-dualstackoverlay-e2e.steps.yaml @@ -33,25 +33,16 @@ steps: pwd kubectl cluster-info kubectl get po -owide -A - echo "install Cilium ${CILIUM_DUALSTACK_VERSION}" - export DIR=$(echo ${CILIUM_DUALSTACK_VERSION#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-dualstack.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - export CILIUM_VERSION_TAG=${CILIUM_DUALSTACK_VERSION} + export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) echo "$CILIUM_VERSION_TAG" - [[ -z $IPV6_HP_BPF_VERSION ]] && IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version) - echo "$IPV6_HP_BPF_VERSION" - [[ -z $IPV6_IMAGE_REGISTRY ]] && IPV6_IMAGE_REGISTRY=acnpublic.azurecr.io - [[ -n $IPV6_HP_BPF_IMAGE_REPO_PATH ]] && IPV6_IMAGE_REGISTRY="$IPV6_IMAGE_REGISTRY"/"$IPV6_HP_BPF_IMAGE_REPO_PATH" - echo "$IPV6_HP_BPF_IMAGE_REGISTRY" - echo "install Cilium ${CILIUM_DUALSTACK_VERSION} onto Overlay Cluster" - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY},${IPV6_IMAGE_REGISTRY},${IPV6_HP_BPF_VERSION}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + [[ -z $IPV6_HP_BPF_VERSION ]] && export IPV6_HP_BPF_VERSION=$(make ipv6-hp-bpf-version) + echo "IPV6_HP_BPF_VERSION: $IPV6_HP_BPF_VERSION" + [[ -z $IPV6_IMAGE_REGISTRY ]] && export IPV6_IMAGE_REGISTRY=acnpublic.azurecr.io + [[ -n $IPV6_HP_BPF_IMAGE_REPO_PATH ]] && export IPV6_IMAGE_REGISTRY="$IPV6_IMAGE_REGISTRY"/"$IPV6_HP_BPF_IMAGE_REPO_PATH" + echo "IPV6_IMAGE_REGISTRY: $IPV6_IMAGE_REGISTRY" + echo "installing cilium dualstack version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium-dualstack kubectl get po -owide -A name: "installCilium" displayName: "Install Cilium on AKS Dualstack Overlay" diff --git a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-step-template.yaml index f3b488ade5..da89403c71 100644 --- a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-step-template.yaml @@ -57,17 +57,9 @@ steps: pwd kubectl cluster-info kubectl get po -owide -A - echo "install Cilium ${CILIUM_VERSION_TAG}" export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium kubectl get po -owide -A kubectl get crd -A name: "installCilium" diff --git a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.steps.yaml b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.steps.yaml index a41c7f3353..58cb525dc1 100644 --- a/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e.steps.yaml @@ -51,17 +51,9 @@ steps: pwd kubectl cluster-info kubectl get po -owide -A - echo "install Cilium ${CILIUM_VERSION_TAG}" export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium kubectl get po -owide -A kubectl get crd -A name: "installCilium" diff --git a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml index 2e5b6e911e..542cc58012 100644 --- a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e-step-template.yaml @@ -40,12 +40,8 @@ steps: ls -lah export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "installing cilium hubble version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium-hubble # Use different file directories for nightly and current cilium version name: "installCilium" displayName: "Install Cilium on AKS Overlay" diff --git a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml index f6afd59698..a4aa3f3e56 100644 --- a/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium-overlay-withhubble/cilium-overlay-e2e.steps.yaml @@ -34,12 +34,8 @@ steps: ls -lah export CILIUM_VERSION_TAG=${CILIUM_HUBBLE_VERSION_TAG} export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config-hubble.yaml - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_IMAGE_REGISTRY},${CILIUM_VERSION_TAG}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "installing cilium hubble version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium-hubble # Use different file directories for nightly and current cilium version name: "installCilium" displayName: "Install Cilium on AKS Overlay" diff --git a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml index 2456b17192..6393bdba53 100644 --- a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e-step-template.yaml @@ -44,28 +44,12 @@ steps: kubectl cluster-info kubectl get po -owide -A if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - FILE_PATH=-nightly echo "Running nightly" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-config.yaml - # Passes Cilium image to daemonset and deployment - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/deployment.yaml | kubectl apply -f - - # Use different file directories for nightly and current cilium version - kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-agent - kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-operator + make -C ./hack/aks deploy-cilium-nightly else - echo "install Cilium ${CILIUM_VERSION_TAG}" export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium fi kubectl get po -owide -A diff --git a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml index 5313896097..09352320b2 100644 --- a/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium-overlay/cilium-overlay-e2e.steps.yaml @@ -38,28 +38,12 @@ steps: kubectl cluster-info kubectl get po -owide -A if [ "$CILIUM_VERSION_TAG" = "cilium-nightly-pipeline" ]; then - FILE_PATH=-nightly echo "Running nightly" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-config.yaml - # Passes Cilium image to daemonset and deployment - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/deployment.yaml | kubectl apply -f - - # Use different file directories for nightly and current cilium version - kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-agent - kubectl apply -f test/integration/manifests/cilium/cilium${FILE_PATH}-operator + make -C ./hack/aks deploy-cilium-nightly else - echo "install Cilium ${CILIUM_VERSION_TAG}" export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium fi kubectl get po -owide -A diff --git a/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml b/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml index 5284285541..302637b7cd 100644 --- a/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml +++ b/.pipelines/singletenancy/cilium/cilium-e2e-step-template.yaml @@ -39,17 +39,10 @@ steps: pwd kubectl cluster-info kubectl get po -owide -A - echo "install Cilium ${CILIUM_VERSION_TAG}" export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + # cilium version tag is already exported + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium kubectl get po -owide -A name: "installCilium" displayName: "Install Cilium" diff --git a/.pipelines/singletenancy/cilium/cilium-e2e.steps.yaml b/.pipelines/singletenancy/cilium/cilium-e2e.steps.yaml index eb94724ef1..1a6c429844 100644 --- a/.pipelines/singletenancy/cilium/cilium-e2e.steps.yaml +++ b/.pipelines/singletenancy/cilium/cilium-e2e.steps.yaml @@ -33,17 +33,10 @@ steps: pwd kubectl cluster-info kubectl get po -owide -A - echo "install Cilium ${CILIUM_VERSION_TAG}" export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2) - echo "installing files from ${DIR}" - echo "deploy Cilium ConfigMap" - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml - # Passes Cilium image to daemonset and deployment - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-agent/files - kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-operator/files - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - envsubst '${CILIUM_VERSION_TAG},${CILIUM_IMAGE_REGISTRY}' < test/integration/manifests/cilium/v${DIR}/cilium-operator/templates/deployment.yaml | kubectl apply -f - + # cilium version tag is already exported + echo "installing cilium version ${CILIUM_VERSION_TAG} from directory ${DIR}" + make -C ./hack/aks deploy-cilium kubectl get po -owide -A name: "installCilium" displayName: "Install Cilium" diff --git a/hack/aks/README.md b/hack/aks/README.md index 7e3e89ffa8..d1fe00fc53 100644 --- a/hack/aks/README.md +++ b/hack/aks/README.md @@ -45,4 +45,12 @@ AKS Clusters windows-nodepool-up Add windows node pool down Delete the cluster vmss-restart Restart the nodes of the cluster + +Deploy (onto a byocni cluster without kube-proxy) + deploy-cilium Deploy standard Cilium with agent, operator, and config + deploy-cilium-hubble Deploy Cilium with Hubble observability enabled + deploy-cilium-nightly Deploy Cilium using nightly builds + deploy-cilium-dualstack Deploy Cilium with dual-stack IPv4/IPv6 support + deploy-ebpf-overlay-cilium Deploy Cilium with eBPF dataplane on overlay networks + deploy-ebpf-podsubnet-cilium Deploy Cilium with eBPF dataplane on podsubnet networks ``` diff --git a/hack/aks/deploy.mk b/hack/aks/deploy.mk index 3b02fad9ee..58d1aa769b 100644 --- a/hack/aks/deploy.mk +++ b/hack/aks/deploy.mk @@ -1,19 +1,85 @@ +# general cilium variables +DIR ?= 1.17 +CILIUM_VERSION_TAG ?= v1.17.7-250927 +CILIUM_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking +IPV6_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking +IPV6_HP_BPF_VERSION ?= v0.0.1 + +# ebpf cilium variables EBPF_CILIUM_DIR ?= 1.17 # we don't use CILIUM_VERSION_TAG or CILIUM_IMAGE_REGISTRY because we want to use the version supported by ebpf -EBPF_CILIUM_VERSION_TAG ?= v1.17.7-250927 EBPF_CILIUM_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking -IPV6_HP_BPF_VERSION ?= v0.0.1 +EBPF_CILIUM_VERSION_TAG ?= v1.17.7-250927 AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking AZURE_IPTABLES_MONITOR_TAG ?= v0.0.3 AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY ?= mcr.microsoft.com/containernetworking AZURE_IP_MASQ_MERGER_TAG ?= v0.0.1-0 + # so we can use in envsubst +export IPV6_IMAGE_REGISTRY export IPV6_HP_BPF_VERSION +export CILIUM_VERSION_TAG +export CILIUM_IMAGE_REGISTRY + +# ebpf export AZURE_IPTABLES_MONITOR_IMAGE_REGISTRY export AZURE_IPTABLES_MONITOR_TAG export AZURE_IP_MASQ_MERGER_IMAGE_REGISTRY export AZURE_IP_MASQ_MERGER_TAG +wait-for-cilium: + @kubectl rollout status deployment/cilium-operator -n kube-system --timeout=1200s + @kubectl rollout status daemonset/cilium -n kube-system --timeout=1200s + +# vanilla cilium deployment +deploy-cilium-config: + kubectl apply -f ../../test/integration/manifests/cilium/v$(DIR)/cilium-config/cilium-config.yaml + +deploy-cilium-agent: + kubectl apply -f ../../test/integration/manifests/cilium/v$(DIR)/cilium-agent/files + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY}' < ../../test/integration/manifests/cilium/v$(DIR)/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + +deploy-cilium-operator: + kubectl apply -f ../../test/integration/manifests/cilium/v$(DIR)/cilium-operator/files + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY}' < ../../test/integration/manifests/cilium/v$(DIR)/cilium-operator/templates/deployment.yaml | kubectl apply -f - + +deploy-cilium: deploy-cilium-config deploy-cilium-agent deploy-cilium-operator wait-for-cilium + +# cilium with hubble deployment +deploy-cilium-config-hubble: + kubectl apply -f ../../test/integration/manifests/cilium/v$(DIR)/cilium-config/cilium-config-hubble.yaml + +deploy-cilium-hubble: deploy-cilium-config-hubble deploy-cilium-agent deploy-cilium-operator wait-for-cilium + +# cilium nightly deployment +deploy-cilium-config-nightly: + kubectl apply -f ../../test/integration/manifests/cilium/cilium-nightly-config.yaml + +deploy-cilium-agent-nightly: + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY}' < ../../test/integration/manifests/cilium/daemonset.yaml | kubectl apply -f - + kubectl apply -f ../../test/integration/manifests/cilium/cilium-nightly-agent + +deploy-cilium-operator-nightly: + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY}' < ../../test/integration/manifests/cilium/deployment.yaml | kubectl apply -f - + kubectl apply -f ../../test/integration/manifests/cilium/cilium-nightly-operator + +deploy-cilium-nightly: deploy-cilium-config-nightly deploy-cilium-agent-nightly deploy-cilium-operator-nightly wait-for-cilium + +# cilium dualstack deployment +deploy-cilium-config-dualstack: + kubectl apply -f ../../test/integration/manifests/cilium/v$(DIR)/cilium-config/cilium-config-dualstack.yaml + +deploy-cilium-agent-dualstack: + kubectl apply -f ../../test/integration/manifests/cilium/v$(DIR)/cilium-agent/files + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY},$${IPV6_IMAGE_REGISTRY},$${IPV6_HP_BPF_VERSION}' < ../../test/integration/manifests/cilium/v$(DIR)/cilium-agent/templates/daemonset-dualstack.yaml | kubectl apply -f - + +deploy-cilium-operator-dualstack: + kubectl apply -f ../../test/integration/manifests/cilium/v$(DIR)/cilium-operator/files + envsubst '$${CILIUM_VERSION_TAG},$${CILIUM_IMAGE_REGISTRY}' < ../../test/integration/manifests/cilium/v$(DIR)/cilium-operator/templates/deployment.yaml | kubectl apply -f - + +deploy-cilium-dualstack: deploy-cilium-config-dualstack deploy-cilium-agent-dualstack deploy-cilium-operator-dualstack wait-for-cilium + +# ebpf deploy-common-ebpf-cilium: @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/cilium-agent/files/ @kubectl apply -f ../../test/integration/manifests/cilium/v$(EBPF_CILIUM_DIR)/cilium-operator/files/ @@ -43,7 +109,3 @@ deploy-ebpf-podsubnet-cilium: deploy-common-ebpf-cilium | kubectl apply -f - @$(MAKE) wait-for-cilium -wait-for-cilium: - @kubectl rollout status deployment/cilium-operator -n kube-system --timeout=1200s - @kubectl rollout status daemonset/cilium -n kube-system --timeout=1200s - From ef6db44cdd851227fb9c3f84187d16ce6aa5441e Mon Sep 17 00:00:00 2001 From: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Date: Tue, 6 Jan 2026 14:22:43 -0800 Subject: [PATCH 44/47] =?UTF-8?q?Deploy=20Linux=20BYON=20nodes=20and=20ena?= =?UTF-8?q?ble=20datapath=20tests=20on=20long=20running=20clu=E2=80=A6=20(?= =?UTF-8?q?#4187)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Deploy Linux BYON nodes and enable datapath tests on long running cluster * Update .pipelines/swiftv2-long-running/scripts/deploy_linuxbyon.sh Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Fix indentation as per copilot suggestion. Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> * Update .pipelines/swiftv2-long-running/scripts/deploy_linuxbyon.sh Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> --------- Signed-off-by: sivakami-projects <126191544+sivakami-projects@users.noreply.github.com> Co-authored-by: sivakami Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../scripts/deploy_linuxbyon.sh | 222 ++++++++++++++ .../template/datapath-tests-stage.yaml | 270 +++++++++++++++++ .../long-running-pipeline-template.yaml | 286 +++--------------- .../swiftv2/longRunningCluster/datapath.go | 4 +- 4 files changed, 542 insertions(+), 240 deletions(-) create mode 100644 .pipelines/swiftv2-long-running/scripts/deploy_linuxbyon.sh create mode 100644 .pipelines/swiftv2-long-running/template/datapath-tests-stage.yaml diff --git a/.pipelines/swiftv2-long-running/scripts/deploy_linuxbyon.sh b/.pipelines/swiftv2-long-running/scripts/deploy_linuxbyon.sh new file mode 100644 index 0000000000..83b89aa691 --- /dev/null +++ b/.pipelines/swiftv2-long-running/scripts/deploy_linuxbyon.sh @@ -0,0 +1,222 @@ +#!/bin/bash +set -e + +RESOURCE_GROUP=$1 +BUILD_SOURCE_DIR=$2 +BICEP_TEMPLATE_PATH="${BUILD_SOURCE_DIR}/Networking-Aquarius/.pipelines/singularity-runner/byon/linux.bicep" + +upload_kubeconfig() { + local cluster_name=$1 + local kubeconfig_file="./kubeconfig-${cluster_name}" + local secret_name="${RESOURCE_GROUP}-${cluster_name}-kubeconfig" + + echo "Fetching AKS credentials for cluster: ${cluster_name}" + az aks get-credentials \ + --resource-group "$RESOURCE_GROUP" \ + --name "$cluster_name" \ + --file "$kubeconfig_file" \ + --overwrite-existing + + echo "Storing kubeconfig for ${cluster_name} in Azure Key Vault..." + if [[ -f "$kubeconfig_file" ]]; then + az keyvault secret set \ + --vault-name "$CLUSTER_KUBECONFIG_KEYVAULT_NAME" \ + --name "$secret_name" \ + --value "$(cat "$kubeconfig_file")" \ + --subscription "$KEY_VAULT_SUBSCRIPTION" \ + >> /dev/null + + if [[ $? -eq 0 ]]; then + echo "Successfully stored kubeconfig in Key Vault secret: $secret_name" + else + echo "##vso[task.logissue type=error]Failed to store kubeconfig for ${cluster_name} in Key Vault" + exit 1 + fi + else + echo "##vso[task.logissue type=error]Kubeconfig file not found at: $kubeconfig_file" + exit 1 + fi +} + +create_and_check_vmss() { + local cluster_name=$1 + local node_type=$2 + local vmss_sku=$3 + local nic_count=$4 + local node_name="${cluster_name}-${node_type}" + local log_file="./lin-script-${node_name}.log" + local extension_name="NodeJoin-${node_name}" + local kubeconfig_secret="${RESOURCE_GROUP}-${cluster_name}-kubeconfig" + + echo "Creating Linux VMSS Node '${node_name}' for cluster '${cluster_name}'" + set +e + az deployment group create -n "sat${node_name}" \ + --resource-group "$RESOURCE_GROUP" \ + --template-file "$BICEP_TEMPLATE_PATH" \ + --parameters vnetname="$cluster_name" \ + subnetname="nodenet" \ + name="$node_name" \ + sshPublicKey="$ssh_public_key" \ + vnetrgname="$RESOURCE_GROUP" \ + extensionName="$extension_name" \ + clusterKubeconfigKeyvaultName="$CLUSTER_KUBECONFIG_KEYVAULT_NAME" \ + clusterKubeconfigSecretName="$kubeconfig_secret" \ + keyVaultSubscription="$KEY_VAULT_SUBSCRIPTION" \ + vmsssku="$vmss_sku" \ + vmsscount=2 \ + delegatedNicsCount="$nic_count" \ + 2>&1 | tee "$log_file" + local deployment_exit_code=$? + set -e + + if [[ $deployment_exit_code -ne 0 ]]; then + echo "##vso[task.logissue type=error]Azure deployment failed for VMSS '$node_name' with exit code $deployment_exit_code" + exit 1 + fi + + echo "Checking status for VMSS '${node_name}'" + local node_exists + node_exists=$(az vmss show --resource-group "$RESOURCE_GROUP" --name "$node_name" --query "name" -o tsv 2>/dev/null) + if [[ -z "$node_exists" ]]; then + echo "##vso[task.logissue type=error]VMSS '$node_name' does not exist." + exit 1 + else + echo "Successfully created VMSS: $node_name" + fi +} + +wait_for_nodes_ready() { + local cluster_name=$1 + local node_name=$2 + local kubeconfig_file="./kubeconfig-${cluster_name}" + + echo "Waiting for nodes from VMSS '${node_name}' to join cluster and become ready..." + local expected_nodes=2 + + # Check if BYO nodes have joined cluster using VMSS name label + for ((retry=1; retry<=15; retry++)); do + nodes=($(kubectl --kubeconfig "$kubeconfig_file" get nodes -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | grep "^${node_name}" || true)) + echo "Found ${#nodes[@]} nodes: ${nodes[*]}" + + if [ ${#nodes[@]} -ge $expected_nodes ]; then + echo "Found ${#nodes[@]} nodes from VMSS ${node_name}: ${nodes[*]}" + break + else + if [ $retry -eq 15 ]; then + echo "##vso[task.logissue type=error]Timeout waiting for nodes from VMSS ${node_name} to join the cluster" + kubectl --kubeconfig "$kubeconfig_file" get nodes -o wide || true + exit 1 + fi + echo "Retry $retry: Waiting for nodes to join... (${#nodes[@]}/$expected_nodes joined)" + sleep 30 + fi + done + + echo "Checking if nodes are ready..." + for ((ready_retry=1; ready_retry<=7; ready_retry++)); do + echo "Ready check attempt $ready_retry of 7" + all_ready=true + + for nodename in "${nodes[@]}"; do + ready=$(kubectl --kubeconfig "./kubeconfig-${cluster_name}" get node "$nodename" -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null || echo "False") + if [ "$ready" != "True" ]; then + echo "Node $nodename is not ready yet (status: $ready)" + all_ready=false + else + echo "Node $nodename is ready" + fi + done + + if [ "$all_ready" = true ]; then + echo "All nodes from VMSS ${node_name} are ready" + break + else + if [ $ready_retry -eq 7 ]; then + echo "##vso[task.logissue type=error]Timeout: Nodes from VMSS ${node_name} are not ready after 7 attempts" + kubectl --kubeconfig "$kubeconfig_file" get nodes -o wide || true + exit 1 + fi + echo "Waiting 30 seconds before retry..." + sleep 30 + fi + done +} + +label_vmss_nodes() { + local cluster_name=$1 + local kubeconfig_file="./kubeconfig-${cluster_name}" + + echo "Labeling BYON nodes in ${cluster_name} with workload-type=swiftv2-linux-byon" + kubectl --kubeconfig "$kubeconfig_file" label nodes -l kubernetes.azure.com/managed=false workload-type=swiftv2-linux-byon --overwrite + + echo "Labeling ${cluster_name}-linux-default nodes with nic-capacity=low-nic" + kubectl --kubeconfig "$kubeconfig_file" get nodes -o name | grep "${cluster_name}-linux-default" | xargs -I {} kubectl --kubeconfig "$kubeconfig_file" label {} nic-capacity=low-nic --overwrite || true + + echo "Labeling ${cluster_name}-linux-highnic nodes with nic-capacity=high-nic" + kubectl --kubeconfig "$kubeconfig_file" get nodes -o name | grep "${cluster_name}-linux-highnic" | xargs -I {} kubectl --kubeconfig "$kubeconfig_file" label {} nic-capacity=high-nic --overwrite || true + + SOURCE_NODE=$(kubectl --kubeconfig "$kubeconfig_file" get nodes --selector='!kubernetes.azure.com/managed' -o jsonpath='{.items[0].metadata.name}') + + if [ -z "$SOURCE_NODE" ]; then + echo "Error: No BYON nodes found to use as source for label copying" + exit 1 + fi + + echo "Using node $SOURCE_NODE as source for label copying" + + LABEL_KEYS=( + "kubernetes\.azure\.com\/podnetwork-type" + "kubernetes\.azure\.com\/podnetwork-subscription" + "kubernetes\.azure\.com\/podnetwork-resourcegroup" + "kubernetes\.azure\.com\/podnetwork-name" + "kubernetes\.azure\.com\/podnetwork-subnet" + "kubernetes\.azure\.com\/podnetwork-multi-tenancy-enabled" + "kubernetes\.azure\.com\/podnetwork-delegationguid" + "kubernetes\.azure\.com\/cluster") + + nodes=($(kubectl --kubeconfig "$kubeconfig_file" get nodes -l kubernetes.azure.com/managed=false -o jsonpath='{.items[*].metadata.name}')) + + for NODENAME in "${nodes[@]}"; do + for label_key in "${LABEL_KEYS[@]}"; do + v=$(kubectl --kubeconfig "$kubeconfig_file" get nodes "$SOURCE_NODE" -o jsonpath="{.metadata.labels['$label_key']}") + l=$(echo "$label_key" | sed 's/\\//g') + echo "Labeling node $NODENAME with $l=$v" + kubectl --kubeconfig "$kubeconfig_file" label node "$NODENAME" "$l=$v" --overwrite + done + done +} + +echo "Fetching SSH public key from Key Vault..." +ssh_public_key=$(az keyvault secret show \ + --name "$SSH_PUBLIC_KEY_SECRET_NAME" \ + --vault-name "$CLUSTER_KUBECONFIG_KEYVAULT_NAME" \ + --subscription "$KEY_VAULT_SUBSCRIPTION" \ + --query value -o tsv 2>/dev/null || echo "") + +if [[ -z "$ssh_public_key" ]]; then + echo "##vso[task.logissue type=error]Failed to retrieve SSH public key from Key Vault" + exit 1 +fi + +cluster_names="aks-1 aks-2" +for cluster_name in $cluster_names; do + upload_kubeconfig "$cluster_name" + + echo "Installing CNI plugins for cluster $cluster_name" + if ! helm install -n kube-system azure-cni-plugins ${BUILD_SOURCE_DIR}/Networking-Aquarius/.pipelines/singularity-runner/byon/chart/base \ + --set installCniPlugins.enabled=true \ + --kubeconfig "./kubeconfig-${cluster_name}"; then + echo "##vso[task.logissue type=error]Failed to install CNI plugins for cluster ${cluster_name}" + exit 1 + fi + echo "Creating VMSS nodes for cluster $cluster_name..." + create_and_check_vmss "$cluster_name" "linux-highnic" "Standard_D16s_v3" "7" + wait_for_nodes_ready "$cluster_name" "$cluster_name-linux-highnic" + + create_and_check_vmss "$cluster_name" "linux-default" "Standard_D8s_v3" "2" + wait_for_nodes_ready "$cluster_name" "$cluster_name-linux-default" + + label_vmss_nodes "$cluster_name" +done + +echo "VMSS deployment completed successfully for both clusters." diff --git a/.pipelines/swiftv2-long-running/template/datapath-tests-stage.yaml b/.pipelines/swiftv2-long-running/template/datapath-tests-stage.yaml new file mode 100644 index 0000000000..361908a0e5 --- /dev/null +++ b/.pipelines/swiftv2-long-running/template/datapath-tests-stage.yaml @@ -0,0 +1,270 @@ +parameters: + - name: workloadType + type: string + - name: subscriptionId + type: string + - name: rgName + type: string + - name: storageAccount1 + type: string + default: '' + - name: storageAccount2 + type: string + default: '' + - name: runSetupStages + type: boolean + default: false + - name: dependsOn + type: object + default: [] + +stages: + - stage: DataPathTests_${{ replace(parameters.workloadType, '-', '_') }} + displayName: "Stage: Swiftv2 Data Path Tests - ${{ parameters.workloadType }}" + ${{ if ne(length(parameters.dependsOn), 0) }}: + dependsOn: ${{ parameters.dependsOn }} + ${{ else }}: + dependsOn: AKSClusterAndNetworking + condition: or(eq(${{ parameters.runSetupStages }}, false), succeeded()) + variables: + storageAccount1: ${{ parameters.storageAccount1 }} + storageAccount2: ${{ parameters.storageAccount2 }} + jobs: + - job: SetupKubeconfig + displayName: "Setup Kubeconfig Files" + steps: + - task: AzureCLI@2 + displayName: "Generate and verify kubeconfig files" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Pipeline.Workspace)/kubeconfigs + + echo "==> Setting up kubeconfig for cluster aks-1" + az aks get-credentials \ + --resource-group ${{ parameters.rgName }} \ + --name aks-1 \ + --file $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig \ + --overwrite-existing \ + --admin + + echo "==> Setting up kubeconfig for cluster aks-2" + az aks get-credentials \ + --resource-group ${{ parameters.rgName }} \ + --name aks-2 \ + --file $(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig \ + --overwrite-existing \ + --admin + + echo "==> Verifying cluster aks-1 connectivity" + kubectl --kubeconfig $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig get nodes + + echo "==> Verifying cluster aks-2 connectivity" + kubectl --kubeconfig $(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig get nodes + + - task: PublishPipelineArtifact@1 + displayName: "Publish kubeconfig files" + inputs: + targetPath: $(Pipeline.Workspace)/kubeconfigs + artifactName: kubeconfigs-${{ parameters.workloadType }} + publishLocation: pipeline + + - job: CreatePods + displayName: "Create Swiftv2 Pods" + dependsOn: SetupKubeconfig + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs-${{ parameters.workloadType }} + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Create pods" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Creating pods (8 scenarios) for workload type: ${{ parameters.workloadType }}" + export RG="${{ parameters.rgName }}" + export BUILD_ID="${{ parameters.rgName }}" + export WORKLOAD_TYPE="${{ parameters.workloadType }}" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=1h -tags=create_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + + - job: ConnectivityTests + displayName: "Test Pod-to-Pod Connectivity" + dependsOn: CreatePods + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs-${{ parameters.workloadType }} + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Run Connectivity Tests" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Running connectivity tests for workload type: ${{ parameters.workloadType }}" + export RG="${{ parameters.rgName }}" + export BUILD_ID="${{ parameters.rgName }}" + export WORKLOAD_TYPE="${{ parameters.workloadType }}" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=30m -tags=connectivity_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + + - job: PrivateEndpointTests + displayName: "Test Private Endpoint Access" + dependsOn: ConnectivityTests + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs-${{ parameters.workloadType }} + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + name: DiscoverStorageAccounts + displayName: "Discover storage accounts" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + STORAGE_ACCOUNT_1="${{ parameters.storageAccount1 }}" + STORAGE_ACCOUNT_2="${{ parameters.storageAccount2 }}" + + if [ -z "$STORAGE_ACCOUNT_1" ] || [ -z "$STORAGE_ACCOUNT_2" ]; then + echo "Storage account variables not set, discovering from resource group..." + STORAGE_ACCOUNT_1=$(az storage account list -g ${{ parameters.rgName }} --query "[?starts_with(name, 'sa1')].name" -o tsv) + STORAGE_ACCOUNT_2=$(az storage account list -g ${{ parameters.rgName }} --query "[?starts_with(name, 'sa2')].name" -o tsv) + echo "Discovered: STORAGE_ACCOUNT_1=$STORAGE_ACCOUNT_1, STORAGE_ACCOUNT_2=$STORAGE_ACCOUNT_2" + fi + + echo "##vso[task.setvariable variable=storageAccount1;isOutput=true]$STORAGE_ACCOUNT_1" + echo "##vso[task.setvariable variable=storageAccount2;isOutput=true]$STORAGE_ACCOUNT_2" + + - task: AzureCLI@2 + displayName: "Assign RBAC for SAS token generation" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh" + arguments: > + assign + ${{ parameters.subscriptionId }} + ${{ parameters.rgName }} + "$(DiscoverStorageAccounts.storageAccount1)" + + - task: AzureCLI@2 + displayName: "Run Private Endpoint Tests" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Running Private Endpoint connectivity tests for workload type: ${{ parameters.workloadType }}" + export RG="${{ parameters.rgName }}" + export BUILD_ID="${{ parameters.rgName }}" + export WORKLOAD_TYPE="${{ parameters.workloadType }}" + export STORAGE_ACCOUNT_1="$(DiscoverStorageAccounts.storageAccount1)" + export STORAGE_ACCOUNT_2="$(DiscoverStorageAccounts.storageAccount2)" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=30m -tags=private_endpoint_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Remove RBAC after tests" + condition: always() + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: scriptPath + scriptPath: ".pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh" + arguments: > + delete + ${{ parameters.subscriptionId }} + ${{ parameters.rgName }} + "$(DiscoverStorageAccounts.storageAccount1) $(DiscoverStorageAccounts.storageAccount2)" + + - job: ScaleTest + displayName: "Scale Tests - create and delete pods at scale" + dependsOn: + - PrivateEndpointTests + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs-${{ parameters.workloadType }} + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Run Scale Test (Create and Delete)" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Running scale test for workload type: ${{ parameters.workloadType }}" + echo " - 10 pods in aks-1 (cx_vnet_v1/s1)" + echo " - 10 pods in aks-2 (cx_vnet_v3/s1)" + export RG="${{ parameters.rgName }}" + export BUILD_ID="${{ parameters.rgName }}" + export WORKLOAD_TYPE="${{ parameters.workloadType }}" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout 1h -tags=scale_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs + + - job: DeleteTestResources + displayName: "Delete PodNetwork, PNI, and Pods" + dependsOn: + - CreatePods + - ConnectivityTests + - PrivateEndpointTests + - ScaleTest + condition: always() + steps: + - task: DownloadPipelineArtifact@2 + displayName: "Download kubeconfig files" + inputs: + artifactName: kubeconfigs-${{ parameters.workloadType }} + targetPath: $(Pipeline.Workspace)/kubeconfigs + + - task: AzureCLI@2 + displayName: "Delete Test Resources" + inputs: + azureSubscription: $(AZURE_SERVICE_CONNECTION) + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + echo "==> Deleting test resources (8 scenarios) for workload type: ${{ parameters.workloadType }}" + export RG="${{ parameters.rgName }}" + export BUILD_ID="${{ parameters.rgName }}" + export WORKLOAD_TYPE="${{ parameters.workloadType }}" + cd ./test/integration/swiftv2/longRunningCluster + go test -v -timeout=1h -tags=delete_test + workingDirectory: $(System.DefaultWorkingDirectory) + env: + KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig + KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs diff --git a/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml index cfb134b20a..32f5c2fe98 100644 --- a/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml +++ b/.pipelines/swiftv2-long-running/template/long-running-pipeline-template.yaml @@ -8,6 +8,18 @@ parameters: - name: runSetupStages type: boolean default: false + - name: workloadType + type: object + default: [ "swiftv2-linux", "swiftv2-linux-byon" ] + displayName: "Swiftv2 scenarios to run" + + +resources: + repositories: + - repository: Networking-Aquarius + type: git + name: One/Networking-Aquarius + ref: refs/heads/master variables: - name: rgName @@ -130,250 +142,48 @@ stages: ${{ parameters.subscriptionId }} $(rgName) ${{ parameters.location }} - - stage: DataPathTests - displayName: "Stage: Swiftv2 Data Path Tests on Linux Managed Nodes" - dependsOn: AKSClusterAndNetworking - condition: or(eq(${{ parameters.runSetupStages }}, false), succeeded()) - variables: - storageAccount1: $[ stageDependencies.AKSClusterAndNetworking.NetworkingAndStorage.outputs['CreateStorageAccounts.StorageAccount1'] ] - storageAccount2: $[ stageDependencies.AKSClusterAndNetworking.NetworkingAndStorage.outputs['CreateStorageAccounts.StorageAccount2'] ] - jobs: - - job: SetupKubeconfig - displayName: "Setup Kubeconfig Files" - steps: - - task: AzureCLI@2 - displayName: "Generate and verify kubeconfig files" - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - mkdir -p $(Pipeline.Workspace)/kubeconfigs - - echo "==> Setting up kubeconfig for cluster aks-1" - az aks get-credentials \ - --resource-group $(rgName) \ - --name aks-1 \ - --file $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig \ - --overwrite-existing \ - --admin - - echo "==> Setting up kubeconfig for cluster aks-2" - az aks get-credentials \ - --resource-group $(rgName) \ - --name aks-2 \ - --file $(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig \ - --overwrite-existing \ - --admin - - echo "==> Verifying cluster aks-1 connectivity" - kubectl --kubeconfig $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig get nodes - - echo "==> Verifying cluster aks-2 connectivity" - kubectl --kubeconfig $(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig get nodes - - - task: PublishPipelineArtifact@1 - displayName: "Publish kubeconfig files" - inputs: - targetPath: $(Pipeline.Workspace)/kubeconfigs - artifactName: kubeconfigs - publishLocation: pipeline - - - job: CreatePods - displayName: "Create Swiftv2 Pods" - dependsOn: SetupKubeconfig - steps: - - task: DownloadPipelineArtifact@2 - displayName: "Download kubeconfig files" - inputs: - artifactName: kubeconfigs - targetPath: $(Pipeline.Workspace)/kubeconfigs - - - task: AzureCLI@2 - displayName: "Create pods" - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - echo "==> Creating pods (8 scenarios)" - export RG="$(rgName)" - export BUILD_ID="$(rgName)" - export WORKLOAD_TYPE="swiftv2-linux" - cd ./test/integration/swiftv2/longRunningCluster - go test -v -timeout=1h -tags=create_test - workingDirectory: $(System.DefaultWorkingDirectory) - env: - KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig - KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs - - - job: ConnectivityTests - displayName: "Test Pod-to-Pod Connectivity" - dependsOn: CreatePods + - job: DeployLinuxBYON + displayName: "Join Linux BYON VMSS to AKS Cluster" + dependsOn: + - CreateCluster + - NetworkingAndStorage + pool: + vmImage: ubuntu-latest steps: - - task: DownloadPipelineArtifact@2 - displayName: "Download kubeconfig files" - inputs: - artifactName: kubeconfigs - targetPath: $(Pipeline.Workspace)/kubeconfigs - + - checkout: self + displayName: "Checkout main repository" + - checkout: Networking-Aquarius + displayName: "Checkout Networking-Aquarius repository" - task: AzureCLI@2 - displayName: "Run Connectivity Tests" - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - echo "==> Running connectivity tests" - export RG="$(rgName)" - export BUILD_ID="$(rgName)" - export WORKLOAD_TYPE="swiftv2-linux" - cd ./test/integration/swiftv2/longRunningCluster - go test -v -timeout=30m -tags=connectivity_test - workingDirectory: $(System.DefaultWorkingDirectory) - env: - KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig - KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs - - - job: PrivateEndpointTests - displayName: "Test Private Endpoint Access" - dependsOn: ConnectivityTests - steps: - - task: DownloadPipelineArtifact@2 - displayName: "Download kubeconfig files" - inputs: - artifactName: kubeconfigs - targetPath: $(Pipeline.Workspace)/kubeconfigs - - - task: AzureCLI@2 - name: DiscoverStorageAccounts - displayName: "Discover storage accounts" - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - STORAGE_ACCOUNT_1="$(storageAccount1)" - STORAGE_ACCOUNT_2="$(storageAccount2)" - - if [ -z "$STORAGE_ACCOUNT_1" ] || [ -z "$STORAGE_ACCOUNT_2" ]; then - echo "Storage account variables not set, discovering from resource group..." - STORAGE_ACCOUNT_1=$(az storage account list -g $(rgName) --query "[?starts_with(name, 'sa1')].name" -o tsv) - STORAGE_ACCOUNT_2=$(az storage account list -g $(rgName) --query "[?starts_with(name, 'sa2')].name" -o tsv) - echo "Discovered: STORAGE_ACCOUNT_1=$STORAGE_ACCOUNT_1, STORAGE_ACCOUNT_2=$STORAGE_ACCOUNT_2" - fi - - echo "##vso[task.setvariable variable=storageAccount1;isOutput=true]$STORAGE_ACCOUNT_1" - echo "##vso[task.setvariable variable=storageAccount2;isOutput=true]$STORAGE_ACCOUNT_2" - - - task: AzureCLI@2 - displayName: "Assign RBAC for SAS token generation" + displayName: "Deploy Linux VMSS" inputs: azureSubscription: $(AZURE_SERVICE_CONNECTION) scriptType: bash scriptLocation: scriptPath - scriptPath: ".pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh" + scriptPath: "$(Build.SourcesDirectory)/azure-container-networking/.pipelines/swiftv2-long-running/scripts/deploy_linuxbyon.sh" arguments: > - assign - ${{ parameters.subscriptionId }} $(rgName) - "$(DiscoverStorageAccounts.storageAccount1)" - - - task: AzureCLI@2 - displayName: "Run Private Endpoint Tests" - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - echo "==> Running Private Endpoint connectivity tests" - export RG="$(rgName)" - export BUILD_ID="$(rgName)" - export WORKLOAD_TYPE="swiftv2-linux" - export STORAGE_ACCOUNT_1="$(DiscoverStorageAccounts.storageAccount1)" - export STORAGE_ACCOUNT_2="$(DiscoverStorageAccounts.storageAccount2)" - cd ./test/integration/swiftv2/longRunningCluster - go test -v -timeout=30m -tags=private_endpoint_test - workingDirectory: $(System.DefaultWorkingDirectory) - env: - KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig - KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs - - - task: AzureCLI@2 - displayName: "Remove RBAC after tests" - condition: always() - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: scriptPath - scriptPath: ".pipelines/swiftv2-long-running/scripts/manage_storage_rbac.sh" - arguments: > - delete - ${{ parameters.subscriptionId }} - $(rgName) - "$(DiscoverStorageAccounts.storageAccount1) $(DiscoverStorageAccounts.storageAccount2)" - - - job: ScaleTest - displayName: "Scale Tests - create and delete pods at scale." - dependsOn: - - PrivateEndpointTests - steps: - - task: DownloadPipelineArtifact@2 - displayName: "Download kubeconfig files" - inputs: - artifactName: kubeconfigs - targetPath: $(Pipeline.Workspace)/kubeconfigs - - - task: AzureCLI@2 - displayName: "Run Scale Test (Create and Delete)" - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - echo "==> Running scale test: Create 20 pods with device plugin across both clusters" - echo " - 10 pods in aks-1 (cx_vnet_v1/s1)" - echo " - 10 pods in aks-2 (cx_vnet_v3/s1)" - export RG="$(rgName)" - export BUILD_ID="$(rgName)" - export WORKLOAD_TYPE="swiftv2-linux" - cd ./test/integration/swiftv2/longRunningCluster - go test -v -timeout 1h -tags=scale_test - workingDirectory: $(System.DefaultWorkingDirectory) - env: - KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig - KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs - - - job: DeleteTestResources - displayName: "Delete PodNetwork, PNI, and Pods" - dependsOn: - - CreatePods - - ConnectivityTests - - PrivateEndpointTests - - ScaleTest - condition: always() - steps: - - task: DownloadPipelineArtifact@2 - displayName: "Download kubeconfig files" - inputs: - artifactName: kubeconfigs - targetPath: $(Pipeline.Workspace)/kubeconfigs - - - task: AzureCLI@2 - displayName: "Delete Test Resources" - inputs: - azureSubscription: $(AZURE_SERVICE_CONNECTION) - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - echo "==> Deleting test resources (8 scenarios)" - export RG="$(rgName)" - export BUILD_ID="$(rgName)" - export WORKLOAD_TYPE="swiftv2-linux" - cd ./test/integration/swiftv2/longRunningCluster - go test -v -timeout=1h -tags=delete_test - workingDirectory: $(System.DefaultWorkingDirectory) + $(Build.SourcesDirectory) env: - KUBECONFIG: $(Pipeline.Workspace)/kubeconfigs/aks-1.kubeconfig:$(Pipeline.Workspace)/kubeconfigs/aks-2.kubeconfig - KUBECONFIG_DIR: $(Pipeline.Workspace)/kubeconfigs - \ No newline at end of file + SSH_PUBLIC_KEY_SECRET_NAME: $(SSH_PUBLIC_KEY_SECRET_NAME) + CLUSTER_KUBECONFIG_KEYVAULT_NAME: $(CLUSTER_KUBECONFIG_KEYVAULT_NAME) + KEY_VAULT_RESOURCE_GROUP: $(KEY_VAULT_RESOURCE_GROUP) + KEY_VAULT_SUBSCRIPTION: $(KEY_VAULT_SUBSCRIPTION) + + # Run tests for each workload type specified in the parameter + - ${{ each workload in parameters.workloadType }}: + - template: datapath-tests-stage.yaml + parameters: + workloadType: ${{ workload }} + subscriptionId: ${{ parameters.subscriptionId }} + rgName: $(rgName) + storageAccount1: $[ stageDependencies.AKSClusterAndNetworking.NetworkingAndStorage.outputs['CreateStorageAccounts.StorageAccount1'] ] + storageAccount2: $[ stageDependencies.AKSClusterAndNetworking.NetworkingAndStorage.outputs['CreateStorageAccounts.StorageAccount2'] ] + runSetupStages: ${{ parameters.runSetupStages }} + ${{ if eq(workload, 'swiftv2-linux') }}: + dependsOn: + - AKSClusterAndNetworking + ${{ else }}: + dependsOn: + - AKSClusterAndNetworking + - DataPathTests_swiftv2_linux \ No newline at end of file diff --git a/test/integration/swiftv2/longRunningCluster/datapath.go b/test/integration/swiftv2/longRunningCluster/datapath.go index e37aafa163..f021bcfc68 100644 --- a/test/integration/swiftv2/longRunningCluster/datapath.go +++ b/test/integration/swiftv2/longRunningCluster/datapath.go @@ -141,8 +141,8 @@ func isValidWorkloadType(workloadType string) bool { validTypes := []string{ "swiftv2-linux", "swiftv2-windows", - "swiftv2-linux-byocni", - "swiftv2-windows-byocni", + "swiftv2-linux-byon", + "swiftv2-windows-byon", } for _, validType := range validTypes { From a03d95aa94101ef45476149ee7871a5918b73a49 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jan 2026 23:08:41 +0000 Subject: [PATCH 45/47] deps: bump the all-go-minor-and-patch group across 1 directory with 2 updates (#4159) Bumps the all-go-minor-and-patch group with 2 updates in the /dropgz directory: [github.com/spf13/cobra](https://github.com/spf13/cobra) and [go.uber.org/zap](https://github.com/uber-go/zap). Updates `github.com/spf13/cobra` from 1.10.1 to 1.10.2 - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.10.1...v1.10.2) Updates `go.uber.org/zap` from 1.27.0 to 1.27.1 - [Release notes](https://github.com/uber-go/zap/releases) - [Changelog](https://github.com/uber-go/zap/blob/master/CHANGELOG.md) - [Commits](https://github.com/uber-go/zap/compare/v1.27.0...v1.27.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-version: 1.10.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go-minor-and-patch - dependency-name: go.uber.org/zap dependency-version: 1.27.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: all-go-minor-and-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dropgz/go.mod | 4 ++-- dropgz/go.sum | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/dropgz/go.mod b/dropgz/go.mod index 2048a2df30..849d916960 100644 --- a/dropgz/go.mod +++ b/dropgz/go.mod @@ -5,8 +5,8 @@ go 1.24.0 require ( github.com/jsternberg/zap-logfmt v1.3.0 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.10.1 - go.uber.org/zap v1.27.0 + github.com/spf13/cobra v1.10.2 + go.uber.org/zap v1.27.1 ) require ( diff --git a/dropgz/go.sum b/dropgz/go.sum index 1d45879985..8a01cb1aa2 100644 --- a/dropgz/go.sum +++ b/dropgz/go.sum @@ -10,8 +10,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= @@ -20,8 +20,9 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 931a2455a96aa822bd54fe0a5a22277ac38f46d1 Mon Sep 17 00:00:00 2001 From: Isaac Date: Fri, 9 Jan 2026 11:43:14 -0800 Subject: [PATCH 46/47] fix: infinite loop in Device Plugin caused by stale SocketWatcher state (#4163) * fix: remove closed socker watcher from slice Signed-off-by: GitHub * fix: remove the socket if it already exists Signed-off-by: GitHub * test: update test comments and error message Signed-off-by: GitHub * fix: lint Signed-off-by: GitHub * lint: fix err Signed-off-by: GitHub * nit: tempDir() Signed-off-by: GitHub * test: use local testdata dir for socket tests to fix Windows CI issues Replaces usage of `t.TempDir()` with a local `testdata` directory for temporary socket files in `socketwatcher_test.go` and `server_test.go`. This change addresses two issues on Windows: 1. `t.TempDir()` creates files in the system temp directory, which can cause cleanup failures and disk space issues on CI agents if files are left behind. 2. Explicitly closes file handles after `os.Create()` to prevent "Access is denied" errors during cleanup, as Windows does not allow deleting open files. Also adds a `.gitignore` to `cns/deviceplugin/testdata` to ensure temporary socket files are not tracked. Signed-off-by: GitHub * lint: fix err name Signed-off-by: GitHub --------- Signed-off-by: GitHub --- cns/deviceplugin/server.go | 6 ++ cns/deviceplugin/server_test.go | 69 +++++++++++++++ cns/deviceplugin/socketwatcher.go | 7 +- cns/deviceplugin/socketwatcher_test.go | 111 ++++++++++++++++++++----- cns/deviceplugin/testdata/.gitignore | 1 + cns/deviceplugin/testdata/socket.sock | 0 6 files changed, 171 insertions(+), 23 deletions(-) create mode 100644 cns/deviceplugin/server_test.go create mode 100644 cns/deviceplugin/testdata/.gitignore delete mode 100644 cns/deviceplugin/testdata/socket.sock diff --git a/cns/deviceplugin/server.go b/cns/deviceplugin/server.go index a07ee41043..fe22607fa2 100644 --- a/cns/deviceplugin/server.go +++ b/cns/deviceplugin/server.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net" + "os" "time" "github.com/pkg/errors" @@ -48,6 +49,11 @@ func (s *Server) Run(ctx context.Context) error { defer cancel() s.shutdownCh = childCtx.Done() + // remove the socket if it already exists + if err := os.Remove(s.address); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "error removing socket") + } + l, err := net.Listen("unix", s.address) if err != nil { return errors.Wrap(err, "error listening on socket") diff --git a/cns/deviceplugin/server_test.go b/cns/deviceplugin/server_test.go new file mode 100644 index 0000000000..b3d044baca --- /dev/null +++ b/cns/deviceplugin/server_test.go @@ -0,0 +1,69 @@ +package deviceplugin + +import ( + "context" + "net" + "os" + "path/filepath" + "testing" + "time" + + "go.uber.org/zap" +) + +type mockDeviceCounter struct { + count int +} + +func (m *mockDeviceCounter) getDeviceCount() int { + return m.count +} + +func TestServer_Run_CleansUpExistingSocket(t *testing.T) { + // Create a temporary directory for the socket + socketPath := filepath.Join("testdata", "test.sock") + defer os.Remove(socketPath) + + // Create a dummy file at the socket path to simulate a stale socket + if err := os.WriteFile(socketPath, []byte("stale socket"), 0o600); err != nil { + t.Fatalf("failed to create dummy socket file: %v", err) + } + + logger := zap.NewNop() + counter := &mockDeviceCounter{count: 1} + server := NewServer(logger, socketPath, counter, time.Second) + + // Create a context that we can cancel to stop the server + ctx, cancel := context.WithCancel(context.Background()) + + // Run the server in a goroutine + errChan := make(chan error) + go func() { + errChan <- server.Run(ctx) + }() + + // Wait for the server to start up, delete the pre-existing file and recreate it as a socket + // We verify this by trying to connect to the socket repeatedly until success or timeout + var conn net.Conn + var err error + // Retry for up to 2 seconds + for start := time.Now(); time.Since(start) < 2*time.Second; time.Sleep(200 * time.Millisecond) { + conn, err = net.Dial("unix", socketPath) + if err == nil { + conn.Close() + break + } + } + + if err != nil { + t.Errorf("failed to connect to socket: %v", err) + } + + // Stop the server + cancel() + + // Wait for Run to return + if err := <-errChan; err != nil { + t.Errorf("server.Run returned error: %v", err) + } +} diff --git a/cns/deviceplugin/socketwatcher.go b/cns/deviceplugin/socketwatcher.go index 05b7df602b..5d1c7d621b 100644 --- a/cns/deviceplugin/socketwatcher.go +++ b/cns/deviceplugin/socketwatcher.go @@ -56,7 +56,12 @@ func (s *SocketWatcher) WatchSocket(ctx context.Context, socket string) <-chan s socketChan := make(chan struct{}) s.socketChans[socket] = socketChan go func() { - defer close(socketChan) + defer func() { + s.mutex.Lock() + delete(s.socketChans, socket) + s.mutex.Unlock() + close(socketChan) + }() ticker := time.NewTicker(s.options.statInterval) defer ticker.Stop() for { diff --git a/cns/deviceplugin/socketwatcher_test.go b/cns/deviceplugin/socketwatcher_test.go index e987358481..4275734726 100644 --- a/cns/deviceplugin/socketwatcher_test.go +++ b/cns/deviceplugin/socketwatcher_test.go @@ -12,12 +12,23 @@ import ( ) func TestWatchContextCancelled(t *testing.T) { + socket := filepath.Join("testdata", "socket.sock") + f, createErr := os.Create(socket) + if createErr != nil { + t.Fatalf("error creating test file %s: %v", socket, createErr) + } + f.Close() + defer os.Remove(socket) + ctx, cancel := context.WithCancel(context.Background()) - logger, _ := zap.NewDevelopment() + logger, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("failed to create logger: %v", err) + } s := deviceplugin.NewSocketWatcher(logger) done := make(chan struct{}) go func(done chan struct{}) { - <-s.WatchSocket(ctx, "testdata/socket.sock") + <-s.WatchSocket(ctx, socket) close(done) }(done) @@ -39,19 +50,18 @@ func TestWatchContextCancelled(t *testing.T) { } func TestWatchSocketDeleted(t *testing.T) { - // Create a temporary directory - tempDir, err := os.MkdirTemp("", "socket-watcher-test-") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) + socket := filepath.Join("testdata", "to-be-deleted.sock") + f, createErr := os.Create(socket) + if createErr != nil { + t.Fatalf("error creating test file %s: %v", socket, createErr) } - defer os.RemoveAll(tempDir) // Ensure the directory is cleaned up + f.Close() + defer os.Remove(socket) - socket := filepath.Join(tempDir, "to-be-deleted.sock") - if _, err := os.Create(socket); err != nil { - t.Fatalf("error creating test file %s: %v", socket, err) + logger, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("failed to create logger: %v", err) } - - logger, _ := zap.NewDevelopment() s := deviceplugin.NewSocketWatcher(logger, deviceplugin.SocketWatcherStatInterval(time.Second)) done := make(chan struct{}) go func(done chan struct{}) { @@ -79,19 +89,18 @@ func TestWatchSocketDeleted(t *testing.T) { } func TestWatchSocketTwice(t *testing.T) { - // Create a temporary directory - tempDir, err := os.MkdirTemp("", "socket-watcher-test-") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) + socket := filepath.Join("testdata", "to-be-deleted.sock") + f, createErr := os.Create(socket) + if createErr != nil { + t.Fatalf("error creating test file %s: %v", socket, createErr) } - defer os.RemoveAll(tempDir) // Ensure the directory is cleaned up + f.Close() + defer os.Remove(socket) - socket := filepath.Join(tempDir, "to-be-deleted.sock") - if _, err := os.Create(socket); err != nil { - t.Fatalf("error creating test file %s: %v", socket, err) + logger, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("failed to create logger: %v", err) } - - logger, _ := zap.NewDevelopment() s := deviceplugin.NewSocketWatcher(logger, deviceplugin.SocketWatcherStatInterval(time.Second)) done1 := make(chan struct{}) done2 := make(chan struct{}) @@ -134,3 +143,61 @@ func TestWatchSocketTwice(t *testing.T) { t.Fatal("socket watcher is still watching 5 seconds after file is deleted") } } + +func TestWatchSocketCleanup(t *testing.T) { + socket := filepath.Join("testdata", "to-be-deleted.sock") + f, createErr := os.Create(socket) + if createErr != nil { + t.Fatalf("error creating test file %s: %v", socket, createErr) + } + f.Close() + defer os.Remove(socket) + + logger, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("failed to create logger: %v", err) + } + // Use a short interval for faster test execution + s := deviceplugin.NewSocketWatcher(logger, deviceplugin.SocketWatcherStatInterval(100*time.Millisecond)) + + // 1. Watch the socket + ch1 := s.WatchSocket(context.Background(), socket) + + // Verify it's open + select { + case <-ch1: + t.Fatal("channel should be open initially") + default: + } + + // 2. Delete the socket to trigger watcher exit + if removeErr := os.Remove(socket); removeErr != nil { + t.Fatalf("failed to remove socket: %v", removeErr) + } + + // 3. Wait for ch1 to close + select { + case <-ch1: + // Expected + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for watcher to detect socket deletion") + } + + // 4. Recreate the socket + f, err = os.Create(socket) + if err != nil { + t.Fatalf("error recreating test file %s: %v", socket, err) + } + f.Close() + + // 5. Watch the socket again + ch2 := s.WatchSocket(context.Background(), socket) + + // 6. Verify ch2 is open + select { + case <-ch2: + t.Fatal("channel is closed but expected to be open") + case <-time.After(200 * time.Millisecond): + // Wait for at least one tick to ensure the watcher has had a chance to run. + } +} diff --git a/cns/deviceplugin/testdata/.gitignore b/cns/deviceplugin/testdata/.gitignore new file mode 100644 index 0000000000..c74d682773 --- /dev/null +++ b/cns/deviceplugin/testdata/.gitignore @@ -0,0 +1 @@ +*.sock diff --git a/cns/deviceplugin/testdata/socket.sock b/cns/deviceplugin/testdata/socket.sock deleted file mode 100644 index e69de29bb2..0000000000 From 418e025f7bebff7215206de3f8b6273fa0a77b91 Mon Sep 17 00:00:00 2001 From: Esteban Capillo Date: Mon, 26 Jan 2026 13:57:44 -0600 Subject: [PATCH 47/47] feat: added code for multitenancy cni to create dual stack endpoints --- cni/network/multitenancy.go | 72 +++++++++++++++++++++++++++----- cni/network/multitenancy_mock.go | 4 +- cni/network/network.go | 18 ++++++-- network/endpoint.go | 5 ++- 4 files changed, 81 insertions(+), 18 deletions(-) diff --git a/cni/network/multitenancy.go b/cni/network/multitenancy.go index e617afb7d6..438e0e4ee9 100644 --- a/cni/network/multitenancy.go +++ b/cni/network/multitenancy.go @@ -226,8 +226,8 @@ func (m *Multitenancy) GetAllNetworkContainers( HostSubnetPrefix: hostSubnetPrefixes[i], } - ipconfig, routes := convertToIPConfigAndRouteInfo(ifInfo.NCResponse) - ifInfo.IPConfigs = append(ifInfo.IPConfigs, ipconfig) + ipconfigs, routes := convertToIPConfigAndRouteInfo(ifInfo.NCResponse) + ifInfo.IPConfigs = append(ifInfo.IPConfigs, ipconfigs...) ifInfo.Routes = routes ifInfo.NICType = cns.InfraNIC ifInfo.SkipDefaultRoutes = ncResponses[i].SkipDefaultRoutes @@ -319,18 +319,68 @@ func convertToCniResult(networkConfig *cns.GetNetworkContainerResponse, ifName s return result } -func convertToIPConfigAndRouteInfo(networkConfig *cns.GetNetworkContainerResponse) (*network.IPConfig, []network.RouteInfo) { - ipconfig := &network.IPConfig{} +func convertToIPConfigAndRouteInfo(networkConfig *cns.GetNetworkContainerResponse) ([]*network.IPConfig, []network.RouteInfo) { + ipconfigs := []*network.IPConfig{} cnsIPConfig := networkConfig.IPConfiguration ipAddr := net.ParseIP(cnsIPConfig.IPSubnet.IPAddress) - if ipAddr.To4() != nil { - ipconfig.Address = net.IPNet{IP: ipAddr, Mask: net.CIDRMask(int(cnsIPConfig.IPSubnet.PrefixLength), ipv4FullMask)} - } else { - ipconfig.Address = net.IPNet{IP: ipAddr, Mask: net.CIDRMask(int(cnsIPConfig.IPSubnet.PrefixLength), ipv6FullMask)} + // Create primary IPConfig (always IPv4 in current scenarios) + primaryIPConfig := &network.IPConfig{ + Address: net.IPNet{IP: ipAddr, Mask: net.CIDRMask(int(cnsIPConfig.IPSubnet.PrefixLength), ipv4FullMask)}, + Gateway: net.ParseIP(cnsIPConfig.GatewayIPAddress), } + ipconfigs = append(ipconfigs, primaryIPConfig) + + // Process SecondaryIPConfigs + // For dual-stack scenarios in multitenancy: + // - If a secondary is IPv6, create a separate IPConfig with IPv6 gateway + // - IPv4 secondaries are added to SecondaryIPs array of primary IPConfig + if len(networkConfig.SecondaryIPConfigs) > 0 { + var ipv6DualStackConfig *network.IPConfig + + for _, secondaryIPConfig := range networkConfig.SecondaryIPConfigs { + secondaryIP := net.ParseIP(secondaryIPConfig.IPAddress) + if secondaryIP == nil { + logger.Error("Failed to parse secondary IP address", + zap.String("ipAddress", secondaryIPConfig.IPAddress)) + continue + } - ipconfig.Gateway = net.ParseIP(cnsIPConfig.GatewayIPAddress) + // Check if this is IPv6 for dual-stack + if secondaryIP.To4() == nil { + // IPv6 secondary - create separate IPConfig for dual-stack + if ipv6DualStackConfig == nil { + ipv6DualStackConfig = &network.IPConfig{ + Address: net.IPNet{ + IP: secondaryIP, + Mask: net.CIDRMask(112, ipv6FullMask), // Default /112 for pod IPv6 + }, + Gateway: net.ParseIP(cnsIPConfig.GatewayIPv6Address), + } + logger.Info("Created separate IPv6 IPConfig for dual-stack", + zap.String("ipv6Address", secondaryIP.String()), + zap.String("ipv6Gateway", cnsIPConfig.GatewayIPv6Address)) + } else { + // Additional IPv6 addresses go to IPv6 IPConfig SecondaryIPs + secondaryIPNet := net.IPNet{IP: secondaryIP, Mask: net.CIDRMask(112, ipv6FullMask)} + ipv6DualStackConfig.SecondaryIPs = append(ipv6DualStackConfig.SecondaryIPs, secondaryIPNet) + logger.Info("Added additional IPv6 secondary IP", + zap.String("secondaryIP", secondaryIPNet.String())) + } + } else { + // IPv4 secondary - add to primary IPConfig SecondaryIPs array + secondaryIPNet := net.IPNet{IP: secondaryIP, Mask: net.CIDRMask(int(cnsIPConfig.IPSubnet.PrefixLength), ipv4FullMask)} + primaryIPConfig.SecondaryIPs = append(primaryIPConfig.SecondaryIPs, secondaryIPNet) + logger.Info("Added IPv4 secondary IP", + zap.String("secondaryIP", secondaryIPNet.String())) + } + } + + // Add the IPv6 dual-stack config as a separate IPConfig + if ipv6DualStackConfig != nil { + ipconfigs = append(ipconfigs, ipv6DualStackConfig) + } + } routes := make([]network.RouteInfo, 0) if networkConfig.Routes != nil && len(networkConfig.Routes) > 0 { @@ -343,10 +393,10 @@ func convertToIPConfigAndRouteInfo(networkConfig *cns.GetNetworkContainerRespons for _, ipRouteSubnet := range networkConfig.CnetAddressSpace { routeIPnet := net.IPNet{IP: net.ParseIP(ipRouteSubnet.IPAddress), Mask: net.CIDRMask(int(ipRouteSubnet.PrefixLength), ipv4FullMask)} - routes = append(routes, network.RouteInfo{Dst: routeIPnet, Gw: ipconfig.Gateway}) + routes = append(routes, network.RouteInfo{Dst: routeIPnet, Gw: primaryIPConfig.Gateway}) } - return ipconfig, routes + return ipconfigs, routes } func checkIfSubnetOverlaps(enableInfraVnet bool, nwCfg *cni.NetworkConfig, cnsNetworkConfig *cns.GetNetworkContainerResponse) bool { diff --git a/cni/network/multitenancy_mock.go b/cni/network/multitenancy_mock.go index 13107a3357..aafba47be5 100644 --- a/cni/network/multitenancy_mock.go +++ b/cni/network/multitenancy_mock.go @@ -93,8 +93,8 @@ func (m *MockMultitenancy) GetAllNetworkContainers( HostSubnetPrefix: ipNets[i], } - ipconfig, routes := convertToIPConfigAndRouteInfo(ifInfo.NCResponse) - ifInfo.IPConfigs = append(ifInfo.IPConfigs, ipconfig) + ipconfigs, routes := convertToIPConfigAndRouteInfo(ifInfo.NCResponse) + ifInfo.IPConfigs = append(ifInfo.IPConfigs, ipconfigs...) ifInfo.Routes = routes ifInfo.NICType = cns.InfraNIC diff --git a/cni/network/network.go b/cni/network/network.go index 5eb4552b91..6fb7b38786 100644 --- a/cni/network/network.go +++ b/cni/network/network.go @@ -704,9 +704,21 @@ func (plugin *NetPlugin) createEpInfo(opt *createEpInfoOpt) (*network.EndpointIn // for secondary (Populate addresses) // initially only for infra nic but now applied to all nic types - addresses := make([]net.IPNet, len(opt.ifInfo.IPConfigs)) - for i, ipconfig := range opt.ifInfo.IPConfigs { - addresses[i] = ipconfig.Address + addresses := make([]net.IPNet, 0, len(opt.ifInfo.IPConfigs)) + for _, ipconfig := range opt.ifInfo.IPConfigs { + // Add the primary IP address + addresses = append(addresses, ipconfig.Address) + + // Add any secondary IPs (both IPv4 and IPv6) to the same interface + // This enables dual-stack support for Windows multitenancy Swift v1 mode + if len(ipconfig.SecondaryIPs) > 0 { + for _, secondaryIP := range ipconfig.SecondaryIPs { + addresses = append(addresses, secondaryIP) + logger.Info("Added secondary IP to endpoint addresses", + zap.String("secondaryIP", secondaryIP.String()), + zap.String("nicType", string(opt.ifInfo.NICType))) + } + } } // generate endpoint info diff --git a/network/endpoint.go b/network/endpoint.go index 706069d7a1..47c0f61417 100644 --- a/network/endpoint.go +++ b/network/endpoint.go @@ -146,8 +146,9 @@ type InterfaceInfo struct { } type IPConfig struct { - Address net.IPNet - Gateway net.IP + Address net.IPNet + Gateway net.IP + SecondaryIPs []net.IPNet // Additional IPv4 and IPv6 addresses for the interface } type apipaClient interface {