From 85cecb6e6185c147fe35e91622b8de64e97de6dc Mon Sep 17 00:00:00 2001 From: Aaron U'Ren Date: Wed, 5 Oct 2022 00:20:57 -0500 Subject: [PATCH] feat(pod_cidr): handle multiple pod CIDRs --- .../proxy/network_services_controller.go | 10 +- .../routing/network_routes_controller.go | 103 +-- pkg/utils/cni.go | 403 +++++++++ pkg/utils/cni_test.go | 771 ++++++++++++++++++ pkg/utils/pod_cidr.go | 158 +--- pkg/utils/pod_cidr_test.go | 272 +++--- 6 files changed, 1392 insertions(+), 325 deletions(-) create mode 100644 pkg/utils/cni.go create mode 100644 pkg/utils/cni_test.go diff --git a/pkg/controllers/proxy/network_services_controller.go b/pkg/controllers/proxy/network_services_controller.go index 23645fa0..fa9109f4 100644 --- a/pkg/controllers/proxy/network_services_controller.go +++ b/pkg/controllers/proxy/network_services_controller.go @@ -2323,9 +2323,13 @@ func NewNetworkServicesController(clientset kubernetes.Interface, } if config.RunRouter { - cidr, err := utils.GetPodCidrFromNodeSpec(nsc.client, config.HostnameOverride) + node, err := utils.GetNodeObject(nsc.client, config.HostnameOverride) if err != nil { - return nil, fmt.Errorf("failed to get pod CIDR details from Node.spec: %s", err.Error()) + return nil, fmt.Errorf("failed to get node object due to: %v", err.Error()) + } + cidr, err := utils.GetPodCidrFromNodeSpec(node) + if err != nil { + return nil, fmt.Errorf("failed to get pod CIDR details from Node.spec: %v", err) } nsc.podCidr = cidr } @@ -2334,7 +2338,7 @@ func NewNetworkServicesController(clientset kubernetes.Interface, for i, excludedCidr := range config.ExcludedCidrs { _, ipnet, err := net.ParseCIDR(excludedCidr) if err != nil { - return nil, fmt.Errorf("failed to get excluded CIDR details: %s", err.Error()) + return nil, fmt.Errorf("failed to get excluded CIDR details: %v", err) } nsc.excludedCidrs[i] = *ipnet } diff --git a/pkg/controllers/routing/network_routes_controller.go b/pkg/controllers/routing/network_routes_controller.go index b2e18f44..88de72ce 100644 --- a/pkg/controllers/routing/network_routes_controller.go +++ b/pkg/controllers/routing/network_routes_controller.go @@ -2,13 +2,11 @@ package routing import ( "context" - "encoding/json" "errors" "fmt" "net" "os" "os/exec" - "reflect" "strconv" "strings" "sync" @@ -23,13 +21,12 @@ import ( "github.com/coreos/go-iptables/iptables" gobgpapi "github.com/osrg/gobgp/v3/api" gobgp "github.com/osrg/gobgp/v3/pkg/server" - "k8s.io/klog/v2" - "github.com/prometheus/client_golang/prometheus" "github.com/vishvananda/netlink" v1core "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" ) const ( @@ -130,6 +127,8 @@ type NetworkRoutingController struct { localAddressList []string overrideNextHop bool podCidr string + podIPv4CIDRs []string + podIPv6CIDRs []string CNIFirewallSetup *sync.Cond ipsetMutex *sync.Mutex routeSyncer *routeSyncer @@ -251,8 +250,14 @@ func (nrc *NetworkRoutingController) Run(healthChan chan<- *healthcheck.Controll currentMTU := kubeBridgeIf.Attrs().MTU if currentMTU > 0 && currentMTU != mtu { klog.Warningf("Updating config file with current MTU for kube-bridge: %d", currentMTU) - if err := nrc.configureMTU(currentMTU); err != nil { - klog.Errorf("Failed to update config file due to: %s", err.Error()) + cniNetConf, err := utils.NewCNINetworkConfig(nrc.cniConfFile) + if err == nil { + cniNetConf.SetMTU(currentMTU) + if err = cniNetConf.WriteCNIConfig(); err != nil { + klog.Errorf("Failed to update CNI config file due to: %v", err) + } + } else { + klog.Errorf("Failed to load CNI config file to reset MTU due to: %v", err) } } } @@ -382,75 +387,42 @@ func (nrc *NetworkRoutingController) Run(healthChan chan<- *healthcheck.Controll } func (nrc *NetworkRoutingController) updateCNIConfig() { - cidr, err := utils.GetPodCidrFromCniSpec(nrc.cniConfFile) + // Parse the existing IPAM CIDRs from the CNI conf file + cniNetConf, err := utils.NewCNINetworkConfig(nrc.cniConfFile) if err != nil { - klog.Errorf("Failed to get pod CIDR from CNI conf file: %s", err) + klog.Errorf("failed to parse CNI Config: %v", err) } - if reflect.DeepEqual(cidr, net.IPNet{}) { - klog.Infof("`subnet` in CNI conf file is empty so populating `subnet` in CNI conf file with pod " + - "CIDR assigned to the node obtained from node spec.") - } - cidrlen, _ := cidr.Mask.Size() - oldCidr := cidr.IP.String() + "/" + strconv.Itoa(cidrlen) - - currentCidr := nrc.podCidr - - if len(cidr.IP) == 0 || strings.Compare(oldCidr, currentCidr) != 0 { - err = utils.InsertPodCidrInCniSpec(nrc.cniConfFile, currentCidr) + // Insert any IPv4 CIDRs that are missing from the IPAM configuration in the CNI + for _, ipv4CIDR := range nrc.podIPv4CIDRs { + err = cniNetConf.InsertPodCIDRIntoIPAM(ipv4CIDR) if err != nil { - klog.Fatalf("Failed to insert `subnet`(pod CIDR) into CNI conf file: %s", err.Error()) + klog.Fatalf("failed to insert IPv4 `subnet`(pod CIDR) '%s' into CNI conf file: %v", ipv4CIDR, err) + } + } + + // Insert any IPv4 CIDRs that are missing from the IPAM configuration in the CNI + for _, ipv6CIDR := range nrc.podIPv6CIDRs { + err = cniNetConf.InsertPodCIDRIntoIPAM(ipv6CIDR) + if err != nil { + klog.Fatalf("failed to insert IPv6 `subnet`(pod CIDR) '%s' into CNI conf file: %v", ipv6CIDR, err) } } if nrc.autoMTU { - err = nrc.autoConfigureMTU() + // Get the MTU by looking at the node's interface that is associated with the primary IP of the cluster + mtu, err := utils.GetMTUFromNodeIP(nrc.nodeIP) if err != nil { - klog.Errorf("Failed to auto-configure MTU due to: %s", err.Error()) + klog.Fatalf("failed to generate MTU: %v", err) } - } -} -func (nrc *NetworkRoutingController) configureMTU(mtu int) error { - file, err := os.ReadFile(nrc.cniConfFile) - if err != nil { - return fmt.Errorf("failed to load CNI conf file: %s", err.Error()) + cniNetConf.SetMTU(mtu) } - var config interface{} - err = json.Unmarshal(file, &config) - if err != nil { - return fmt.Errorf("failed to parse JSON from CNI conf file: %s", err.Error()) - } - if strings.HasSuffix(nrc.cniConfFile, ".conflist") { - configMap := config.(map[string]interface{}) - for key := range configMap { - if key != "plugins" { - continue - } - pluginConfigs := configMap["plugins"].([]interface{}) - for _, pluginConfig := range pluginConfigs { - pluginConfigMap := pluginConfig.(map[string]interface{}) - pluginConfigMap["mtu"] = mtu - } - } - } else { - pluginConfig := config.(map[string]interface{}) - pluginConfig["mtu"] = mtu - } - configJSON, _ := json.Marshal(config) - err = os.WriteFile(nrc.cniConfFile, configJSON, 0644) - if err != nil { - return fmt.Errorf("failed to insert `mtu` into CNI conf file: %s", err.Error()) - } - return nil -} -func (nrc *NetworkRoutingController) autoConfigureMTU() error { - mtu, err := utils.GetMTUFromNodeIP(nrc.nodeIP) + err = cniNetConf.WriteCNIConfig() if err != nil { - return fmt.Errorf("failed to generate MTU: %s", err.Error()) + klog.Fatalf("failed to write CNI file: %v", err) } - return nrc.configureMTU(mtu) } func (nrc *NetworkRoutingController) watchBgpUpdates() { @@ -1277,14 +1249,21 @@ func NewNetworkRoutingController(clientset kubernetes.Interface, } } - cidr, err := utils.GetPodCidrFromNodeSpec(clientset, nrc.hostnameOverride) + cidr, err := utils.GetPodCidrFromNodeSpec(node) if err != nil { klog.Fatalf("Failed to get pod CIDR from node spec. kube-router relies on kube-controller-manager to "+ "allocate pod CIDR for the node or an annotation `kube-router.io/pod-cidr`. Error: %v", err) - return nil, fmt.Errorf("failed to get pod CIDR details from Node.spec: %s", err.Error()) + return nil, fmt.Errorf("failed to get pod CIDR details from Node.spec: %v", err) } nrc.podCidr = cidr + nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node) + if err != nil { + klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+ + "allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err) + return nil, fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err) + } + nrc.ipSetHandler, err = utils.NewIPSet(nrc.isIpv6) if err != nil { return nil, err diff --git a/pkg/utils/cni.go b/pkg/utils/cni.go new file mode 100644 index 00000000..81d4a05e --- /dev/null +++ b/pkg/utils/cni.go @@ -0,0 +1,403 @@ +package utils + +import ( + "encoding/json" + "fmt" + "net" + "os" + "reflect" + "strings" +) + +const ( + noIPsSpecifiedErrorMsg = "no IP ranges specified" +) + +type cniNetworkConfig struct { + FilePath string + Conf *Conf + ConfList *ConfList +} + +func NewCNINetworkConfig(cniConfFilePath string) (*cniNetworkConfig, error) { + cniNetConf := cniNetworkConfig{ + FilePath: cniConfFilePath, + } + + cniFileBytes, err := os.ReadFile(cniConfFilePath) + if err != nil { + return nil, fmt.Errorf("error reading %s: %v", cniConfFilePath, err) + } + + // If we're working with a conflist setup + if cniNetConf.IsConfList() { + confList := new(ConfList) + err = json.Unmarshal(cniFileBytes, confList) + if err != nil { + return nil, fmt.Errorf("failed to load CNI conflist file: %v", err) + } + if len(confList.Plugins) == 0 { + return nil, fmt.Errorf("CNI config list %s has no plugins", cniConfFilePath) + } + cniNetConf.ConfList = confList + } else { + // If we're working with a conf setup + conf := new(Conf) + err = json.Unmarshal(cniFileBytes, conf) + if err != nil { + return nil, fmt.Errorf("failed to load CNI conf file: %v", err) + } + if conf.Type == "" { + return nil, fmt.Errorf("error load CNI config, file appears to have no type: %s", cniConfFilePath) + } + cniNetConf.Conf = conf + } + + if err = cniNetConf.consolidateSubnets(); err != nil { + return nil, err + } + + return &cniNetConf, nil +} + +// consolidateSubnets Many people still define the legacy single subnet variation of the IPAM plugin instead of the +// newer ranges variation. To account for this and make parsing simpler, we do the same thing that the official IPAM +// config loader does and collapse them into ranges. +func (c *cniNetworkConfig) consolidateSubnets() error { + brPlug := c.getBridgePlugin() + if brPlug.IPAM.Subnet != "" { + err := c.InsertPodCIDRIntoIPAM(brPlug.IPAM.Subnet) + if err != nil { + return err + } + brPlug.IPAM.Subnet = "" + delete(brPlug.IPAM.raw, "subnet") + } + + return nil +} + +// IsConfList checks to see if this CNI configuration is a *.conflist file or if it is a *.conf file. Returns true for +// *.conflist, returns false for anything else. +func (c *cniNetworkConfig) IsConfList() bool { + return strings.HasSuffix(strings.ToLower(c.FilePath), ".conflist") +} + +// getPodCIDRsMapFromCNISpec gets pod CIDR allocated to the node as a map from CNI spec file and returns it +func (c *cniNetworkConfig) getPodCIDRsMapFromCNISpec() (map[string]*net.IPNet, error) { + podCIDRs := make(map[string]*net.IPNet) + var err error + + ipamConfig := c.getBridgePlugin().IPAM + if err != nil { + if err.Error() != noIPsSpecifiedErrorMsg { + return nil, err + } + return nil, nil + } + + // Parse ranges from ipamConfig + if ipamConfig != nil && len(ipamConfig.Ranges) > 0 { + for _, rangeSet := range ipamConfig.Ranges { + for _, item := range rangeSet { + if item.Subnet != "" { + _, netCIDR, err := net.ParseCIDR(item.Subnet) + if err != nil { + return nil, fmt.Errorf("unable to parse CIDR '%s' contained in CNI: %s", + item.Subnet, c.FilePath) + } + podCIDRs[netCIDR.String()] = netCIDR + } + } + } + } + + return podCIDRs, nil +} + +// GetPodCIDRsFromCNISpec gets pod CIDR allocated to the node from CNI spec file and returns it +func (c *cniNetworkConfig) GetPodCIDRsFromCNISpec() ([]*net.IPNet, error) { + podCIDRMap, err := c.getPodCIDRsMapFromCNISpec() + if err != nil { + return nil, err + } + podCIDRs := make([]*net.IPNet, 0) + for _, podCIDR := range podCIDRMap { + podCIDRs = append(podCIDRs, podCIDR) + } + return podCIDRs, nil +} + +// getBridgePlugin get the bridge plugin configuration out of the cniNetworkConfig in a consistent manner +func (c *cniNetworkConfig) getBridgePlugin() *Conf { + if c.ConfList != nil { + for _, conf := range c.ConfList.Plugins { + if conf.Type == "bridge" { + return conf + } + } + } + return c.Conf +} + +// InsertPodCIDRIntoIPAM insert a new cidr into the CNI file. If the CIDR already exists in the CNI ranges, then +// operation is a noop. Throws an error if either the passed cidr cannot be parsed or if there is a problem with the +// CIDRs already in the CNI config. +func (c *cniNetworkConfig) InsertPodCIDRIntoIPAM(cidr string) error { + ipamConfig := c.getBridgePlugin().IPAM + + // This should have already been sanitized by the GetPodCIDR* functions before it comes to us, but you can never be + // too safe... + _, _, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("unable to parse input cidr: %s - %v", cidr, err) + } + + // Check that we don't already have the cidr in our list of ranges already, if so, consider it a no-op + existingPodCIDRs, err := c.getPodCIDRsMapFromCNISpec() + if err != nil { + return err + } + if _, ok := existingPodCIDRs[cidr]; ok { + return nil + } + + // Add the CIDR that was passed to us + newRange := []*Range{{raw: make(map[string]json.RawMessage), Subnet: cidr}} + ipamConfig.Ranges = append(ipamConfig.Ranges, newRange) + + return nil +} + +func (c *cniNetworkConfig) SetMTU(mtu int) { + brPlugin := c.getBridgePlugin() + brPlugin.MTU = float64(mtu) +} + +func (c *cniNetworkConfig) WriteCNIConfig() error { + var cniBytes []byte + var err error + if c.IsConfList() { + cniBytes, err = json.Marshal(c.ConfList) + if err != nil { + return fmt.Errorf("unable to marshal CNI ConfList: %v", err) + } + } else { + cniBytes, err = json.Marshal(c.Conf) + if err != nil { + return fmt.Errorf("unable to marshal CNI Conf: %v", err) + } + } + + err = os.WriteFile(c.FilePath, cniBytes, 0644) + if err != nil { + return fmt.Errorf("failed to write into CNI conf file: %v", err) + } + + return nil +} + +// This elaborate re-definition of the stuff inside libcni is necessary because the upstream cni structs and funcs were +// only ever meant to unmarshal data. Since each struct only defines the needs of the specific plugins (like bridge or +// ipam), they often times leave out the fields belonging to other plugins. This means when we go to marshal the data +// back into JSON we'll drop fields that are important to other plugins. +// +// So instead, we create a special set of utility structs and funcs that are capable of partially unmarshal-ing JSON +// data. Parsing the information we care about, and leaving the data that we don't care about and may not even know +// about alone. Then it is able to faithfully re-marshal the resulting structs back into their JSON form without losing +// data. +// +// This is very similar to the way that the PerimeterX/marshmallow (https://github.com/PerimeterX/marshmallow) library +// works, except that these functions are capable of marshaling the JSON back to its original form reliably. Whereas +// marshmallow is only able to unmarshal the data[ + +// rawMapAble interface that denotes an object for which we are able to convert it to a list of keys associated with +// raw JSON byte data +type rawMapAble interface { + getRaw() *map[string]json.RawMessage +} + +// All of our CNI based structs are listed here. Each struct only has the fields that we use to specifically read or +// write data to / from. + +// ConfList represents a list of CNI configurations +type ConfList struct { + Plugins []*Conf + raw map[string]json.RawMessage +} + +// Conf represents the individual CNI configuration that may exist on its own, or be part of a ConfList +type Conf struct { + Bridge string + IPAM *IPAM + MTU float64 + Type string + raw map[string]json.RawMessage +} + +// IPAM represents the ipam specific configuration that may exist on a given CNI configuration / plugin +type IPAM struct { + Subnet string + Ranges [][]*Range + raw map[string]json.RawMessage +} + +// Range represents an IP range that may exist within a range set (hence the double array above) +type Range struct { + Subnet string + raw map[string]json.RawMessage +} + +// The following are the implementations of rawMapAble, json.Marshaler, & json.Unmarshaler for each of the above +// structs. Each struct requires the following methods in order to be marshaled / unmarshaled: +// * getRaw() *map[string]json.RawMessage +// * UnmarshalJSON(bytes []byte) error +// * MarshalJson() ([]bytes, error) + +func (c *ConfList) getRaw() *map[string]json.RawMessage { + return &c.raw +} + +func (c *ConfList) UnmarshalJSON(bytes []byte) error { + return PartialJSONUnmarshal(c, bytes) +} + +func (c *ConfList) MarshalJSON() ([]byte, error) { + return PartialJSONMarshal(c) +} + +func (c *Conf) getRaw() *map[string]json.RawMessage { + return &c.raw +} + +func (c *Conf) UnmarshalJSON(bytes []byte) error { + return PartialJSONUnmarshal(c, bytes) +} + +func (c *Conf) MarshalJSON() ([]byte, error) { + return PartialJSONMarshal(c) +} + +func (i *IPAM) getRaw() *map[string]json.RawMessage { + return &i.raw +} + +func (i *IPAM) UnmarshalJSON(bytes []byte) error { + return PartialJSONUnmarshal(i, bytes) +} + +func (i *IPAM) MarshalJSON() ([]byte, error) { + return PartialJSONMarshal(i) +} + +func (r *Range) getRaw() *map[string]json.RawMessage { + return &r.raw +} + +func (r *Range) UnmarshalJSON(bytes []byte) error { + return PartialJSONUnmarshal(r, bytes) +} + +func (r *Range) MarshalJSON() ([]byte, error) { + return PartialJSONMarshal(r) +} + +// PartialJSONUnmarshal allows a struct that implements the rawMapAble interface to be partially unmarshaled. This means +// that via this function we are able to parse and understand the fields that we know about and have defined in the +// struct without knowing every possible field. This still stores the unknown fields and they can be retrieved via the +// getRaw() function and restored properly via the PartialJSONMarshal() function. +func PartialJSONUnmarshal(r rawMapAble, bytes []byte) error { + // Unmarshal the full element into map[string]json.RawMessage so that we can ensure that we capture all elements + // and not just the ones that we have struct fields for + raw := r.getRaw() + if err := json.Unmarshal(bytes, raw); err != nil { + return err + } + + // Go through the struct that lies under the rawMapAble interface and loop through all of its fields + val := reflect.ValueOf(r).Elem() + for i := 0; i < val.NumField(); i++ { + // Get the name and value of the field for later use + name := strings.ToLower(val.Type().Field(i).Name) + valueField := val.Field(i) + if name == "raw" { + continue + } + + // If a name from the underlying struct exists in the raw message, then send it for more complete unmarshalling + if valFromRaw, ok := (*raw)[name]; ok { + if !valueField.CanAddr() { + // Make sure that the value is capable of addressing before we try it below and get a panic + continue + } + + // Unmarshal the raw JSON into the specific interface of the underlying struct, this second pass at + // unmarshalling is how we populate specific fields in our struct rather than just working with raw JSON + if err := json.Unmarshal(valFromRaw, valueField.Addr().Interface()); err != nil { + return err + } + } + } + + return nil +} + +// isNilOrEmpty Unfortunately, we cannot blindly call the IsNil() function on reflect.Value as there are many types like +// strings that are not able to have a nil value and it will cause a panic +func isNilOrEmpty(v reflect.Value) bool { + //nolint:exhaustive // we don't care about all of the potential types here, only the ones that might trip us up + switch v.Kind() { + case reflect.Ptr, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice: + return v.IsNil() + case reflect.String: + return v.Interface() == "" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, + reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64: + return v.IsZero() + } + return false +} + +// PartialJSONMarshal allows a struct that implements the rawMapAble interface to be fully restored without having +// to know about every possible field that may exist within the JSON. This is the reverse process of +// PartialJSONUnmarshal(). +func PartialJSONMarshal(r rawMapAble) ([]byte, error) { + raw := r.getRaw() + + // Find the value of our RawAble struct passed in + val := reflect.ValueOf(r).Elem() + // Iterate over all the fields in the passed struct + for i := 0; i < val.NumField(); i++ { + name := strings.ToLower(val.Type().Field(i).Name) + valueField := val.Field(i) + if name == "raw" { + // Don't attempt to marshal our raw field as that's where we are marshaling to + continue + } + if !valueField.CanAddr() { + // Make sure that the value is capable of addressing before we try it below and get a panic + continue + } + if isNilOrEmpty(valueField) { + // Don't load up the marshaled JSON with a bunch of null values + continue + } + + // We are now reasonably certain that we have a field on the passed struct that is: + // * not our raw field + // * can be addressed + // * is not nil + // Let's marshal it! + bytes, err := json.Marshal(valueField.Addr().Interface()) + if err != nil { + return nil, err + } + + // Take the marshaled value and store it in the raw map alongside other keys that we don't care about and were + // never unmarshalled + (*raw)[name] = bytes + } + + // Finally marshal our raw map which contains both parsed and unparsed fields + return json.Marshal(raw) +} diff --git a/pkg/utils/cni_test.go b/pkg/utils/cni_test.go new file mode 100644 index 00000000..70a5b1d7 --- /dev/null +++ b/pkg/utils/cni_test.go @@ -0,0 +1,771 @@ +package utils + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func getConfList() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "bridge":"kube-bridge", + "ipam":{ + "subnet":"10.242.0.0/24", + "type":"host-local" + }, + "isDefaultGateway":true, + "mtu":9001, + "name":"kubernetes", + "type":"bridge" + } + ] +} +`) +} + +func getConfListWithRanges() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "bridge":"kube-bridge", + "ipam":{ + "ranges": [ + [ + { + "subnet":"10.242.0.0/24" + }, + { + "subnet":"10.242.1.0/24" + } + ], + [ + { + "subnet":"10.242.2.0/24" + }, + { + "subnet":"10.242.3.0/24" + } + ] + ], + "subnet": "10.242.4.0/24", + "type":"host-local" + }, + "isDefaultGateway":true, + "mtu":9001, + "name":"kubernetes", + "type":"bridge" + } + ] +} +`) +} + +func getConfListWithDuplicateRanges() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "bridge":"kube-bridge", + "ipam":{ + "ranges": [ + [ + { + "subnet":"10.242.0.0/24" + }, + { + "subnet":"10.242.1.0/24" + } + ], + [ + { + "subnet":"10.242.2.0/24" + }, + { + "subnet":"10.242.3.0/24" + } + ] + ], + "subnet": "10.242.0.0/24", + "type":"host-local" + }, + "isDefaultGateway":true, + "mtu":9001, + "name":"kubernetes", + "type":"bridge" + } + ] +} +`) +} + +func getConfListWithIPv6DuplicateRanges() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "bridge":"kube-bridge", + "ipam":{ + "ranges": [ + [ + { + "subnet":"10.242.0.0/24" + }, + { + "subnet":"10.242.1.0/24" + } + ], + [ + { + "subnet":"10.242.2.0/24" + }, + { + "subnet":"2001:db8:42:2::/64" + } + ] + ], + "subnet": "2001:db8:42:2::/64", + "type":"host-local" + }, + "isDefaultGateway":true, + "mtu":9001, + "name":"kubernetes", + "type":"bridge" + } + ] +} +`) +} + +func getConfListWithNoSubnet() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "bridge":"kube-bridge", + "ipam":{ + "type":"host-local" + }, + "isDefaultGateway":true, + "name":"kubernetes", + "type":"bridge" + } + ] +} +`) +} + +func getConfListWithNoPlugins() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet" +} +`) +} + +func getConf() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "bridge":"kube-bridge", + "ipam":{ + "type":"host-local", + "subnet": "10.242.0.0/24" + }, + "isDefaultGateway":true, + "name":"kubernetes", + "type":"bridge" +} +`) +} + +func getConfWithNoSubnet() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "bridge":"kube-bridge", + "ipam":{ + "type":"host-local" + }, + "isDefaultGateway":true, + "name":"kubernetes", + "type":"bridge" +} +`) +} + +func getConfWithNoType() []byte { + return []byte(` +{ + "cniVersion":"0.3.0", + "name":"mynet", + "bridge":"kube-bridge", + "ipam":{ + "type":"host-local" + }, + "isDefaultGateway":true, + "name":"kubernetes" +} +`) +} + +func TestMarshalUnmarshalRestoration(t *testing.T) { + t.Run("Ensure ConfList is parsed and unparsed properly", func(t *testing.T) { + before := getConfList() + cl := new(ConfList) + + err := json.Unmarshal(before, cl) + if err != nil { + t.Fatalf("wasn't able to unmarshal JSON in test: %s", before) + } + + after, err := json.MarshalIndent(cl, "", " ") + if err != nil { + t.Fatalf("wasn't able to marshal JSON in test: %s", before) + } + + assert.JSONEqf(t, string(before), string(after), + "JSON is not equal!\nBefore:\n%s\nAfter:\n%s\n", before, after) + }) + t.Run("Ensure ConfListWithRange is parsed and unparsed properly", func(t *testing.T) { + before := getConfListWithRanges() + cl := new(ConfList) + + err := json.Unmarshal(before, cl) + if err != nil { + t.Fatalf("wasn't able to unmarshal JSON in test: %s", before) + } + + after, err := json.MarshalIndent(cl, "", " ") + if err != nil { + t.Fatalf("wasn't able to marshal JSON in test: %s", before) + } + + assert.JSONEqf(t, string(before), string(after), + "JSON is not equal!\nBefore:\n%s\nAfter:\n%s\n", before, after) + }) + t.Run("Ensure ConfListWithNoSubnet is parsed and unparsed properly", func(t *testing.T) { + before := getConfListWithNoSubnet() + cl := new(ConfList) + + err := json.Unmarshal(before, cl) + if err != nil { + t.Fatalf("wasn't able to unmarshal JSON in test: %s", before) + } + + after, err := json.MarshalIndent(cl, "", " ") + if err != nil { + t.Fatalf("wasn't able to marshal JSON in test: %s", before) + } + + assert.JSONEqf(t, string(before), string(after), + "JSON is not equal!\nBefore:\n%s\nAfter:\n%s\n", before, after) + }) + t.Run("Ensure ConfWithNoSubnet is parsed and unparsed properly", func(t *testing.T) { + before := getConfWithNoSubnet() + c := new(Conf) + + err := json.Unmarshal(before, c) + if err != nil { + t.Fatalf("wasn't able to unmarshal JSON in test: %s", before) + } + + after, err := json.MarshalIndent(c, "", " ") + if err != nil { + t.Fatalf("wasn't able to marshal JSON in test: %s", before) + } + + assert.JSONEqf(t, string(before), string(after), + "JSON is not equal!\nBefore:\n%s\nAfter:\n%s\n", before, after) + }) +} + +func TestNewCNINetworkConfig(t *testing.T) { + testcases := []struct { + name string + filename string + isConfList bool + content []byte + err error + ranges []string + }{ + { + name: "Attempt reading from conf", + filename: "10-kuberouter.conf", + isConfList: false, + content: getConf(), + err: nil, + }, + { + name: "Attempt reading from conflist", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfList(), + err: nil, + }, + { + name: "Ensure error upon reading from conf with no type", + filename: "10-kuberouter.conf", + isConfList: false, + content: getConfWithNoType(), + err: fmt.Errorf("error load CNI config, file appears to have no type: "), + }, + { + name: "Ensure error upon reading from conflist with no plugins", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithNoPlugins(), + err: fmt.Errorf("CNI config list "), + }, + { + name: "Ensure conf subnet get consolidated into ranges when only subnet exists", + filename: "10-kuberouter.conf", + isConfList: false, + content: getConf(), + err: nil, + ranges: []string{"10.242.0.0/24"}, + }, + { + name: "Ensure conflist subnet get consolidated into ranges when only subnet exists", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfList(), + err: nil, + ranges: []string{"10.242.0.0/24"}, + }, + { + name: "Ensure conflist subnets get consolidated with ranges when both exist", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24", "10.242.4.0/24"}, + }, + { + name: "Ensure conflist subnets get de-deduplicated with ranges when repeats exist", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithDuplicateRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24"}, + }, + } + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + file, tmpDir, err := createFile(testcase.content, testcase.filename) + if err != nil { + t.Fatalf("Failed to create temporary CNI config file: %v", err) + } + defer os.RemoveAll(tmpDir) + + cni, err := NewCNINetworkConfig(file.Name()) + if err != nil { + if testcase.err == nil { + assert.Fail(t, "if error from NewCNINetworkConfig is not nil, the testcase shouldn't be "+ + "nil either") + } + assert.True(t, strings.HasPrefix(err.Error(), testcase.err.Error())) + if err != nil { + return + } + } + + assert.Equal(t, testcase.isConfList, cni.IsConfList()) + + if testcase.ranges != nil { + assert.Emptyf(t, cni.getBridgePlugin().IPAM.Subnet, + "subnet of cniNetworkConfig should always be empty because it should be consolidated with "+ + "ranges upon creation") + + foundSubnets := make(map[string]interface{}, 0) + for _, rangeSet := range cni.getBridgePlugin().IPAM.Ranges { + for _, rangeSubnet := range rangeSet { + foundSubnets[rangeSubnet.Subnet] = struct{}{} + } + } + + assert.Len(t, foundSubnets, len(testcase.ranges)) + + for _, subnet := range testcase.ranges { + _, found := foundSubnets[subnet] + assert.Truef(t, found, "subnet %s from testcase should have been found in the ranges inside "+ + "cniNetworkConfig", subnet) + } + } + }) + } +} + +func TestCniNetworkConfig_GetPodCIDRsFromCNISpec(t *testing.T) { + testcases := []struct { + name string + filename string + isConfList bool + content []byte + err error + ranges []string + }{ + { + name: "Ensure conf subnet get consolidated into ranges when only subnet exists", + filename: "10-kuberouter.conf", + isConfList: false, + content: getConf(), + err: nil, + ranges: []string{"10.242.0.0/24"}, + }, + { + name: "Ensure conflist subnet get consolidated into ranges when only subnet exists", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfList(), + err: nil, + ranges: []string{"10.242.0.0/24"}, + }, + { + name: "Ensure conflist subnets get consolidated with ranges when both exist", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24", "10.242.4.0/24"}, + }, + { + name: "Ensure conflist subnets get de-deduplicated with ranges when repeats exist", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithDuplicateRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24"}, + }, + { + name: "Ensure conflist subnets get de-deduplicated with ranges when repeats exist", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithIPv6DuplicateRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "2001:db8:42:2::/64"}, + }, + } + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + file, tmpDir, err := createFile(testcase.content, testcase.filename) + if err != nil { + t.Fatalf("Failed to create temporary CNI config file: %v", err) + } + defer os.RemoveAll(tmpDir) + + cni, err := NewCNINetworkConfig(file.Name()) + assert.Equal(t, testcase.err, err) + if err != nil { + return + } + + assert.Equal(t, testcase.isConfList, cni.IsConfList()) + + if testcase.ranges != nil { + assert.Emptyf(t, cni.getBridgePlugin().IPAM.Subnet, + "subnet of cniNetworkConfig should always be empty because it should be consolidated with "+ + "ranges upon creation") + + foundSubnets, err := cni.GetPodCIDRsFromCNISpec() + + assert.Nil(t, err, "err should be nil at this point") + + assert.Len(t, foundSubnets, len(testcase.ranges)) + + for _, subnet := range testcase.ranges { + found := false + for _, foundSubnet := range foundSubnets { + if subnet == foundSubnet.String() { + found = true + } + } + assert.Truef(t, found, "subnet %s from testcase should have been found in the ranges inside "+ + "cniNetworkConfig", subnet) + } + } + }) + } +} + +func TestCniNetworkConfig_InsertPodCIDRIntoIPAM(t *testing.T) { + testcases := []struct { + name string + filename string + isConfList bool + content []byte + err error + ranges []string + insertRanges []string + }{ + { + name: "Ensure passed CIDR is properly inserted into a CNI conf with no subnets defined", + filename: "10-kuberouter.conf", + isConfList: false, + content: getConfWithNoSubnet(), + err: nil, + ranges: []string{"10.242.0.0/24"}, + insertRanges: []string{"10.242.0.0/24"}, + }, + { + name: "Ensure multiple CIDRs are properly inserted into a CNI conf with no subnets defined", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithNoSubnet(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24"}, + insertRanges: []string{"10.242.0.0/24", "10.242.1.0/24"}, + }, + { + name: "Ensure multiple IPv4 & IPv6 CIDRs are properly inserted into a CNI conf with no subnets" + + "defined", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithNoSubnet(), + err: nil, + ranges: []string{"10.242.0.0/24", "2001:db8:42:2::/64"}, + insertRanges: []string{"10.242.0.0/24", "2001:db8:42:2::/64"}, + }, + { + name: "Ensure that new subnets are inserted into a conflist with existing ranges", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24", "10.242.4.0/24", + "10.242.5.0/24", "10.242.6.0/24"}, + insertRanges: []string{"10.242.5.0/24", "10.242.6.0/24"}, + }, + { + name: "Ensure duplicates are not inserted without error", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithDuplicateRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24", "10.242.4.0/24"}, + insertRanges: []string{"10.242.4.0/24"}, + }, + { + name: "Ensure error is thrown for bad cidr", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithDuplicateRanges(), + err: fmt.Errorf("unable to parse input cidr: %s - %s", "10.242.4.0", + "invalid CIDR address: 10.242.4.0"), + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24"}, + insertRanges: []string{"10.242.4.0"}, + }, + } + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + file, tmpDir, err := createFile(testcase.content, testcase.filename) + if err != nil { + t.Fatalf("Failed to create temporary CNI config file: %v", err) + } + defer os.RemoveAll(tmpDir) + + cni, err := NewCNINetworkConfig(file.Name()) + if err != nil { + assert.Fail(t, "err should always be nil when calling NewCNINetworkConfig for this suite") + } + + for _, cidr := range testcase.insertRanges { + err = cni.InsertPodCIDRIntoIPAM(cidr) + assert.Equal(t, testcase.err, err) + } + + expectedSubnets := make([]string, 0) + netSubnets, _ := cni.GetPodCIDRsFromCNISpec() + for _, netSubnet := range netSubnets { + expectedSubnets = append(expectedSubnets, netSubnet.String()) + } + assert.ElementsMatch(t, testcase.ranges, expectedSubnets) + }) + } +} + +func TestCniNetworkConfig_WriteCNIConfig(t *testing.T) { + testcases := []struct { + name string + filename string + isConfList bool + content []byte + err error + ranges []string + insertRanges []string + }{ + { + name: "Ensure written file is the same as read file when no ranges were inserted", + filename: "10-kuberouter.conf", + isConfList: false, + content: getConfWithNoSubnet(), + err: nil, + }, + { + name: "Ensure written conf file contains single subnet", + filename: "10-kuberouter.conf", + isConfList: false, + content: getConf(), + err: nil, + ranges: []string{"10.242.0.0/24"}, + insertRanges: []string{"10.242.0.0/24"}, + }, + { + name: "Ensure written conflist file contains multiple subnets", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithNoSubnet(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24"}, + insertRanges: []string{"10.242.0.0/24", "10.242.1.0/24"}, + }, + { + name: "Ensure written conflist file has IPv4 & IPv6 CIDRs properly inserted", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithNoSubnet(), + err: nil, + ranges: []string{"10.242.0.0/24", "2001:db8:42:2::/64"}, + insertRanges: []string{"10.242.0.0/24", "2001:db8:42:2::/64"}, + }, + { + name: "Ensure that conflist file has multiple subnets written when ranges already exist", + filename: "10-kuberouter.conflist", + isConfList: true, + content: getConfListWithRanges(), + err: nil, + ranges: []string{"10.242.0.0/24", "10.242.1.0/24", "10.242.2.0/24", "10.242.3.0/24", "10.242.4.0/24", + "10.242.5.0/24", "10.242.6.0/24"}, + insertRanges: []string{"10.242.5.0/24", "10.242.6.0/24"}, + }, + } + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + file, tmpDir, err := createFile(testcase.content, testcase.filename) + if err != nil { + t.Fatalf("Failed to create temporary CNI config file: %v", err) + } + defer os.RemoveAll(tmpDir) + + cni, err := NewCNINetworkConfig(file.Name()) + if err != nil { + assert.Fail(t, "err should always be nil when calling NewCNINetworkConfig for this suite") + } + + if testcase.insertRanges != nil { + for _, cidr := range testcase.insertRanges { + err = cni.InsertPodCIDRIntoIPAM(cidr) + assert.Equal(t, testcase.err, err) + } + } + + err = cni.WriteCNIConfig() + if err != nil { + t.Fatalf("Failed to marshal or write CNI file: %v", err) + } + + // Read the CNI directly to ensure that subnet is really removed (which wouldn't be detected upon + // re-initialization of NewCNINetworkConfig below because of how it treats subnets + cniFileBytes, err := os.ReadFile(file.Name()) + if err != nil { + t.Fatalf("we should be able to read the CNI file we just wrote to") + } + var brPlug *Conf + if cni.IsConfList() { + cl := new(ConfList) + err = json.Unmarshal(cniFileBytes, cl) + if err != nil { + t.Fatalf("wasn't able to unmarshal JSON in test: %s", cniFileBytes) + } + for _, plug := range cl.Plugins { + if plug.Type == "bridge" { + brPlug = plug + } + } + } else { + cl := new(Conf) + err = json.Unmarshal(cniFileBytes, cl) + if err != nil { + t.Fatalf("wasn't able to unmarshal JSON in test: %s", cniFileBytes) + } + brPlug = cl + } + + if brPlug == nil { + t.Fatalf("bridge plugin should be populated by all unit tests") + } + assert.Emptyf(t, brPlug.IPAM.Subnet, "upon calling WriteCNIConfig() subnet should ALWAYS be blank "+ + "because it should have been consolidated with ranges") + + cni, err = NewCNINetworkConfig(file.Name()) + if err != nil { + assert.Fail(t, "err should always be nil when calling NewCNINetworkConfig for this suite") + } + + if testcase.ranges != nil { + assert.Emptyf(t, cni.getBridgePlugin().IPAM.Subnet, + "subnet of cniNetworkConfig should always be empty because it should be consolidated with "+ + "ranges upon creation") + + foundSubnets := make(map[string]interface{}, 0) + for _, rangeSet := range cni.getBridgePlugin().IPAM.Ranges { + for _, rangeSubnet := range rangeSet { + foundSubnets[rangeSubnet.Subnet] = struct{}{} + } + } + + assert.Len(t, foundSubnets, len(testcase.ranges)) + + for _, subnet := range testcase.ranges { + _, found := foundSubnets[subnet] + assert.Truef(t, found, "subnet %s from testcase should have been found in the ranges inside "+ + "cniNetworkConfig", subnet) + } + } else { + assert.Emptyf(t, cni.getBridgePlugin().IPAM.Ranges, + "testcase ranges was nil, the subnets re-read from the CNI file after writing should have "+ + "been empty also") + } + }) + } +} + +func createFile(content []byte, filename string) (*os.File, string, error) { + dir, err := os.MkdirTemp("", "kube-router-cni-test") + if err != nil { + return nil, "", fmt.Errorf("cannot create tmpdir: %v", err) + } + fullPath := path.Join(dir, filename) + file, err := os.Create(fullPath) + if err != nil { + return nil, "", fmt.Errorf("cannot create file: %v", err) + } + + if _, err = file.Write(content); err != nil { + return nil, "", fmt.Errorf("cannot write to file: %v", err) + } + + fmt.Println("File is ", file.Name()) + return file, dir, nil +} diff --git a/pkg/utils/pod_cidr.go b/pkg/utils/pod_cidr.go index fe26cba4..edc7e20b 100644 --- a/pkg/utils/pod_cidr.go +++ b/pkg/utils/pod_cidr.go @@ -1,136 +1,23 @@ package utils import ( - "encoding/json" "fmt" "net" - "os" "strings" - "github.com/containernetworking/cni/libcni" - "github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator" v1core "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" netutils "k8s.io/utils/net" ) const ( - podCIDRAnnotation = "kube-router.io/pod-cidr" + podCIDRAnnotation = "kube-router.io/pod-cidr" + podCIDRsAnnotation = "kube-router.io/pod-cidrs" ) -// GetPodCidrFromCniSpec gets pod CIDR allocated to the node from CNI spec file and returns it -func GetPodCidrFromCniSpec(cniConfFilePath string) (net.IPNet, error) { - var podCidr = net.IPNet{} - var err error - var ipamConfig *allocator.IPAMConfig - - if strings.HasSuffix(cniConfFilePath, ".conflist") { - var confList *libcni.NetworkConfigList - confList, err = libcni.ConfListFromFile(cniConfFilePath) - if err != nil { - return net.IPNet{}, fmt.Errorf("failed to load CNI config list file: %s", err.Error()) - } - for _, conf := range confList.Plugins { - if conf.Network.IPAM.Type != "" { - ipamConfig, _, err = allocator.LoadIPAMConfig(conf.Bytes, "") - if err != nil { - if err.Error() != "no IP ranges specified" { - return net.IPNet{}, fmt.Errorf("failed to get IPAM details from the CNI conf file: %s", err.Error()) - } - } - break - } - } - } else { - netconfig, err := libcni.ConfFromFile(cniConfFilePath) - if err != nil { - return net.IPNet{}, fmt.Errorf("failed to load CNI conf file: %s", err.Error()) - } - ipamConfig, _, err = allocator.LoadIPAMConfig(netconfig.Bytes, "") - if err != nil { - // TODO: Handle this error properly in controllers, if no subnet is specified - if err.Error() != "no IP ranges specified" { - return net.IPNet{}, fmt.Errorf("failed to get IPAM details from the CNI conf file: %s", err.Error()) - } - return net.IPNet{}, nil - } - } - // TODO: Support multiple subnet definitions in CNI conf - if ipamConfig != nil && len(ipamConfig.Ranges) > 0 { - for _, rangeset := range ipamConfig.Ranges { - for _, item := range rangeset { - if item.Subnet.IP != nil { - podCidr = net.IPNet(item.Subnet) - break - } - } - } - } - return podCidr, nil -} - -// InsertPodCidrInCniSpec inserts the pod CIDR allocated to the node by kubernetes controller manager -// and stored it in the CNI specification -func InsertPodCidrInCniSpec(cniConfFilePath string, cidr string) error { - file, err := os.ReadFile(cniConfFilePath) - if err != nil { - return fmt.Errorf("failed to load CNI conf file: %s", err.Error()) - } - var config interface{} - if strings.HasSuffix(cniConfFilePath, ".conflist") { - err = json.Unmarshal(file, &config) - if err != nil { - return fmt.Errorf("failed to parse JSON from CNI conf file: %s", err.Error()) - } - updatedCidr := false - configMap := config.(map[string]interface{}) - for key := range configMap { - if key != "plugins" { - continue - } - // .conflist file has array of plug-in config. Find the one with ipam key - // and insert the CIDR for the node - pluginConfigs := configMap["plugins"].([]interface{}) - for _, pluginConfig := range pluginConfigs { - pluginConfigMap := pluginConfig.(map[string]interface{}) - if val, ok := pluginConfigMap["ipam"]; ok { - valObj := val.(map[string]interface{}) - valObj["subnet"] = cidr - updatedCidr = true - break - } - } - } - - if !updatedCidr { - return fmt.Errorf("failed to insert subnet cidr into CNI conf file: %s as CNI file is invalid", cniConfFilePath) - } - - } else { - err = json.Unmarshal(file, &config) - if err != nil { - return fmt.Errorf("failed to parse JSON from CNI conf file: %s", err.Error()) - } - pluginConfig := config.(map[string]interface{}) - pluginConfig["ipam"].(map[string]interface{})["subnet"] = cidr - } - configJSON, _ := json.Marshal(config) - err = os.WriteFile(cniConfFilePath, configJSON, 0644) - if err != nil { - return fmt.Errorf("failed to insert subnet cidr into CNI conf file: %s", err.Error()) - } - return nil -} - // GetPodCidrFromNodeSpec reads the pod CIDR allocated to the node from API node object and returns it -func GetPodCidrFromNodeSpec(clientset kubernetes.Interface, hostnameOverride string) (string, error) { - node, err := GetNodeObject(clientset, hostnameOverride) - if err != nil { - return "", fmt.Errorf("Failed to get pod CIDR allocated for the node due to: " + err.Error()) - } - +func GetPodCidrFromNodeSpec(node *v1core.Node) (string, error) { if cidr, ok := node.Annotations[podCIDRAnnotation]; ok { - _, _, err = net.ParseCIDR(cidr) + _, _, err := net.ParseCIDR(cidr) if err != nil { return "", fmt.Errorf("error parsing pod CIDR in node annotation: %v", err) } @@ -145,35 +32,38 @@ func GetPodCidrFromNodeSpec(clientset kubernetes.Interface, hostnameOverride str return node.Spec.PodCIDR, nil } -// GetPodCidrsFromNodeSpecDualStack reads the IPv4 and IPv6 pod CIDR allocated +// GetPodCIDRsFromNodeSpecDualStack reads the IPv4 and IPv6 pod CIDR allocated // to the node from API node object and returns them -func GetPodCidrsFromNodeSpecDualStack(node *v1core.Node) (string, string, error) { - var podCidrv4, podCidrv6 string +func GetPodCIDRsFromNodeSpecDualStack(node *v1core.Node) ([]string, []string, error) { + var podIPv4CIDRs, podIPv6CIDRs []string - if cidrs, ok := node.Annotations[podCIDRAnnotation]; ok { - for _, cidr := range strings.Split(cidrs, ",") { - if podCidrv4 == "" && netutils.IsIPv4CIDRString(cidr) { - podCidrv4 = cidr + if podCIDRs, ok := node.Annotations[podCIDRsAnnotation]; ok { + for _, cidr := range strings.Split(podCIDRs, ",") { + if _, _, err := net.ParseCIDR(cidr); err != nil { + return podIPv4CIDRs, podIPv6CIDRs, fmt.Errorf("error parsing pod CIDR in node annotation: %v", err) } - if podCidrv6 == "" && netutils.IsIPv6CIDRString(cidr) { - podCidrv6 = cidr + if netutils.IsIPv4CIDRString(cidr) { + podIPv4CIDRs = append(podIPv4CIDRs, cidr) + } + if netutils.IsIPv6CIDRString(cidr) { + podIPv6CIDRs = append(podIPv6CIDRs, cidr) } } - return podCidrv4, podCidrv6, nil + return podIPv4CIDRs, podIPv6CIDRs, nil } if len(node.Spec.PodCIDRs) == 0 { - return "", "", fmt.Errorf("node.Spec.PodCIDRs empty for node: %v", node.Name) + return nil, nil, fmt.Errorf("node.Spec.PodCIDRs empty for node: %v", node.Name) } - for _, cidr := range node.Spec.PodCIDRs { - if podCidrv4 == "" && netutils.IsIPv4CIDRString(cidr) { - podCidrv4 = cidr + for _, podCIDR := range node.Spec.PodCIDRs { + if netutils.IsIPv4CIDRString(podCIDR) { + podIPv4CIDRs = append(podIPv4CIDRs, podCIDR) } - if podCidrv6 == "" && netutils.IsIPv6CIDRString(cidr) { - podCidrv6 = cidr + if netutils.IsIPv6CIDRString(podCIDR) { + podIPv6CIDRs = append(podIPv6CIDRs, podCIDR) } } - return podCidrv4, podCidrv6, nil + return podIPv4CIDRs, podIPv6CIDRs, nil } diff --git a/pkg/utils/pod_cidr_test.go b/pkg/utils/pod_cidr_test.go index 5968e1d1..ac678fe3 100644 --- a/pkg/utils/pod_cidr_test.go +++ b/pkg/utils/pod_cidr_test.go @@ -4,8 +4,6 @@ import ( "context" "errors" "fmt" - "net" - "os" "reflect" "testing" @@ -14,114 +12,6 @@ import ( "k8s.io/client-go/kubernetes/fake" ) -func Test_GetPodCidrFromCniSpec(t *testing.T) { - testcases := []struct { - name string - cniConfFile string - podCidr net.IPNet - err error - filename string - }{ - { - "CNI config file has subnet", - `{"bridge":"kube-bridge","ipam":{"subnet":"172.17.0.0/24","type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"}`, - net.IPNet{ - IP: net.IP{172, 17, 0, 0}, - Mask: net.CIDRMask(24, 32), - }, - nil, - "10-kuberouter.conf", - }, - { - "CNI config file missing subnet", - `{"bridge":"kube-bridge","ipam":{"type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"}`, - net.IPNet{}, - nil, - "10-kuberouter.conf", - }, - } - - for _, testcase := range testcases { - t.Run(testcase.name, func(t *testing.T) { - file, err := createFile(testcase.cniConfFile, testcase.filename) - if err != nil { - t.Fatalf("Failed to create temporary CNI config file: %v", err) - } - defer os.Remove(file.Name()) - - cidr, err := GetPodCidrFromCniSpec(file.Name()) - if !reflect.DeepEqual(err, testcase.err) { - t.Logf("actual error: %v", err) - t.Logf("expected error: %v", testcase.err) - t.Error("did not get expected error") - } - - if !reflect.DeepEqual(cidr, testcase.podCidr) { - t.Logf("actual pod cidr: %v", cidr) - t.Logf("expected pod cidr: %v", testcase.podCidr) - t.Error("did not get expected pod cidr") - } - }) - } -} - -func Test_InsertPodCidrInCniSpec(t *testing.T) { - testcases := []struct { - name string - podCidr string - existingCni string - newCni string - err error - filename string - }{ - { - "insert cidr to cni config", - "172.17.0.0/24", - `{"bridge":"kube-bridge","ipam":{"type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"}`, - `{"bridge":"kube-bridge","ipam":{"subnet":"172.17.0.0/24","type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"}`, - nil, - "/tmp/10-kuberouter.conf", - }, - { - "insert cidr to cni config", - "172.17.0.0/24", - `{"cniVersion":"0.3.0","name":"mynet","plugins":[{"bridge":"kube-bridge","ipam":{"type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"},{"type":"portmap"}]}`, - `{"cniVersion":"0.3.0","name":"mynet","plugins":[{"bridge":"kube-bridge","ipam":{"subnet":"172.17.0.0/24","type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"},{"type":"portmap"}]}`, - nil, - "/tmp/10-kuberouter.conflist", - }, - } - - for _, testcase := range testcases { - t.Run(testcase.name, func(t *testing.T) { - cniConfigFile, err := createFile(testcase.existingCni, testcase.filename) - - if err != nil { - t.Fatalf("failed to create temporary CNI config: %v", err) - } - defer os.Remove(cniConfigFile.Name()) - - err = InsertPodCidrInCniSpec(cniConfigFile.Name(), testcase.podCidr) - if !reflect.DeepEqual(err, testcase.err) { - t.Logf("actual error: %v", err) - t.Logf("expected error: %v", err) - t.Error("did not get expected error") - } - - newContent, err := readFile(cniConfigFile.Name()) - if err != nil { - t.Fatalf("failed to read CNI config file: %v", err) - } - - if newContent != testcase.newCni { - t.Logf("actual CNI config: %v", newContent) - t.Logf("expected CNI config: %v", testcase.newCni) - t.Error("did not get expected CNI config content") - } - }) - } -} - func Test_GetPodCidrFromNodeSpec(t *testing.T) { testcases := []struct { name string @@ -181,8 +71,11 @@ func Test_GetPodCidrFromNodeSpec(t *testing.T) { if err != nil { t.Fatalf("failed to create existing nodes for test: %v", err) } - - podCIDR, err := GetPodCidrFromNodeSpec(clientset, testcase.hostnameOverride) + node, err := GetNodeObject(clientset, testcase.hostnameOverride) + if err != nil { + t.Error("unable to get node object from API: " + err.Error()) + } + podCIDR, err := GetPodCidrFromNodeSpec(node) if !reflect.DeepEqual(err, testcase.err) { t.Logf("actual error: %v", err) t.Logf("expected error: %v", testcase.err) @@ -198,21 +91,148 @@ func Test_GetPodCidrFromNodeSpec(t *testing.T) { } } -func createFile(content, filename string) (*os.File, error) { - file, err := os.Create(filename) - if err != nil { - return nil, fmt.Errorf("cannot create file: %v", err) +func Test_GetPodCIDRsFromNodeSpec(t *testing.T) { + var blankCIDR []string + testcases := []struct { + name string + hostnameOverride string + existingNode *apiv1.Node + ipv4CIDRs []string + ipv6CIDRs []string + err error + }{ + { + "node with node.Spec.PodCIDRs", + "test-node", + &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: apiv1.NodeSpec{ + PodCIDRs: []string{ + "172.17.0.0/24", + "2001:db8:42:0::/64", + }, + }, + }, + []string{"172.17.0.0/24"}, + []string{"2001:db8:42:0::/64"}, + nil, + }, + { + "node with multiple IPv4 addresses and no IPv6 in node.Spec.PodCIDRs", + "test-node", + &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: apiv1.NodeSpec{ + PodCIDRs: []string{ + "172.17.0.0/24", + "172.18.0.0/24", + }, + }, + }, + []string{"172.17.0.0/24", "172.18.0.0/24"}, + blankCIDR, + nil, + }, + { + "node with multiple IPv6 addresses in node.Spec.PodCIDRs", + "test-node", + &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: apiv1.NodeSpec{ + PodCIDRs: []string{ + "172.17.0.0/24", + "2001:db8:42:0::/64", + "2001:db8:42:1::/64", + }, + }, + }, + []string{"172.17.0.0/24"}, + []string{"2001:db8:42:0::/64", "2001:db8:42:1::/64"}, + nil, + }, + { + "node with node.Annotations['kube-router.io/pod-cidrs']", + "test-node", + &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Annotations: map[string]string{ + podCIDRsAnnotation: "172.17.0.0/24,2001:db8:42:2::/64", + }, + }, + }, + []string{"172.17.0.0/24"}, + []string{"2001:db8:42:2::/64"}, + nil, + }, + { + "node with invalid pod IPv4 cidrs in node.Annotations['kube-router.io/pod-cidrs']", + "test-node", + &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Annotations: map[string]string{ + podCIDRsAnnotation: "172.17.0.0,2001:db8:42:2::/64", + }, + }, + }, + blankCIDR, + blankCIDR, + errors.New("error parsing pod CIDR in node annotation: invalid CIDR address: 172.17.0.0"), + }, + { + "node with invalid pod IPv6 cidrs in node.Annotations['kube-router.io/pod-cidrs']", + "test-node", + &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Annotations: map[string]string{ + podCIDRsAnnotation: "172.17.0.0/24,2001:db8:42:2::", + }, + }, + }, + []string{"172.17.0.0/24"}, + blankCIDR, + fmt.Errorf("error parsing pod CIDR in node annotation: invalid CIDR address: %s", + "2001:db8:42:2::"), + }, } - if _, err = file.Write([]byte(content)); err != nil { - return nil, fmt.Errorf("cannot write to file: %v", err) + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + clientset := fake.NewSimpleClientset() + _, err := clientset.CoreV1().Nodes().Create(context.Background(), testcase.existingNode, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to create existing nodes for test: %v", err) + } + node, err := GetNodeObject(clientset, testcase.hostnameOverride) + if err != nil { + t.Error("unable to get node object from API: " + err.Error()) + } + ipv4CIDRs, ipv6CIDRs, err := GetPodCIDRsFromNodeSpecDualStack(node) + if !reflect.DeepEqual(err, testcase.err) { + t.Logf("actual error: %v", err) + t.Logf("expected error: %v", testcase.err) + t.Error("did not get expected error") + } + + if !reflect.DeepEqual(ipv4CIDRs, testcase.ipv4CIDRs) { + t.Logf("actual IPv4 podCIDR: %q", ipv4CIDRs) + t.Logf("expected IPv4 podCIDR: %q", testcase.ipv4CIDRs) + t.Error("did not get expected IPv4 podCIDR") + } + + if !reflect.DeepEqual(ipv6CIDRs, testcase.ipv6CIDRs) { + t.Logf("actual IPv6 podCIDR: %q", ipv6CIDRs) + t.Logf("expected IPv6 podCIDR: %q", testcase.ipv6CIDRs) + t.Error("did not get expected IPv6 podCIDR") + } + }) } - - fmt.Println("File is ", file.Name()) - return file, nil -} - -func readFile(filename string) (string, error) { - content, err := os.ReadFile(filename) - return string(content), err }