From ecaad2c6e43afc3457c45d816ae62a2866ac8766 Mon Sep 17 00:00:00 2001 From: Aaron U'Ren Date: Sun, 21 Apr 2024 11:47:00 -0500 Subject: [PATCH] fix(cleanup): add missing handlers for cleanup kube-router v2.X introduced the idea of iptables and ipset handlers that allow kube-router to be dual-stack capable. However, the cleanup logic for the various controllers was not properly ported when this happened. When the cleanup functions run, they often have not had their controllers fully initialized as cleanup should not be dependant on kube-router being able to reach a kube-apiserver. As such, they were missing these handlers. And as such they either silently ended up doing noops or worse, they would run into nil pointer failures. This corrects that, so that kube-router no longer fails this way and cleans up as it had in v1.X. --- .../netpol/network_policy_controller.go | 24 ++- pkg/controllers/proxy/hairpin_controller.go | 5 +- .../proxy/network_services_controller.go | 144 +++++++++++------- .../routing/network_routes_controller.go | 100 +++++++----- 4 files changed, 177 insertions(+), 96 deletions(-) diff --git a/pkg/controllers/netpol/network_policy_controller.go b/pkg/controllers/netpol/network_policy_controller.go index 57f44206..92f960f4 100644 --- a/pkg/controllers/netpol/network_policy_controller.go +++ b/pkg/controllers/netpol/network_policy_controller.go @@ -723,6 +723,26 @@ func (npc *NetworkPolicyController) cleanupStaleIPSets(activePolicyIPSets map[st func (npc *NetworkPolicyController) Cleanup() { klog.Info("Cleaning up NetworkPolicyController configurations...") + if len(npc.iptablesCmdHandlers) < 1 { + iptablesCmdHandlers, ipSetHandlers, err := NewIPTablesHandlers(nil) + if err != nil { + klog.Errorf("unable to get iptables and ipset handlers: %v", err) + return + } + npc.iptablesCmdHandlers = iptablesCmdHandlers + npc.ipSetHandlers = ipSetHandlers + + // Make other structures that we rely on + npc.iptablesSaveRestore = make(map[v1core.IPFamily]utils.IPTablesSaveRestorer, 2) + npc.iptablesSaveRestore[v1core.IPv4Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv4Protocol) + npc.iptablesSaveRestore[v1core.IPv6Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv6Protocol) + npc.filterTableRules = make(map[v1core.IPFamily]*bytes.Buffer, 2) + var buf bytes.Buffer + npc.filterTableRules[v1core.IPv4Protocol] = &buf + var buf2 bytes.Buffer + npc.filterTableRules[v1core.IPv6Protocol] = &buf2 + } + var emptySet map[string]bool // Take a dump (iptables-save) of the current filter table for cleanupStaleRules() to work on for ipFamily, iptablesSaveRestore := range npc.iptablesSaveRestore { @@ -762,7 +782,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) ( iptablesCmdHandlers := make(map[v1core.IPFamily]utils.IPTablesHandler, 2) ipSetHandlers := make(map[v1core.IPFamily]utils.IPSetHandler, 2) - if config.EnableIPv4 { + if config == nil || config.EnableIPv4 { iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) if err != nil { return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err) @@ -775,7 +795,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) ( } ipSetHandlers[v1core.IPv4Protocol] = ipset } - if config.EnableIPv6 { + if config == nil || config.EnableIPv6 { iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6) if err != nil { return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err) diff --git a/pkg/controllers/proxy/hairpin_controller.go b/pkg/controllers/proxy/hairpin_controller.go index 06bc372e..5801f5c3 100644 --- a/pkg/controllers/proxy/hairpin_controller.go +++ b/pkg/controllers/proxy/hairpin_controller.go @@ -75,15 +75,14 @@ func (hpc *hairpinController) ensureHairpinEnabledForPodInterface(endpointIP str // WARN: This method is deprecated and will be removed once docker-shim is removed from kubelet. pid, err = hpc.nsc.ln.getContainerPidWithDocker(containerID) if err != nil { - return fmt.Errorf("failed to prepare endpoint %s to do direct server return due to %v", - endpointIP, err) + return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err) } } else { // We expect CRI compliant runtimes here // ugly workaround, refactoring of pkg/Proxy is required pid, err = hpc.nsc.ln.getContainerPidWithCRI(hpc.nsc.dsr.runtimeEndpoint, containerID) if err != nil { - return fmt.Errorf("failed to prepare endpoint %s to do DSR due to: %v", endpointIP, err) + return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err) } } klog.V(2).Infof("Found PID %d for endpoint IP %s", pid, endpointIP) diff --git a/pkg/controllers/proxy/network_services_controller.go b/pkg/controllers/proxy/network_services_controller.go index 039c92d0..002b4acc 100644 --- a/pkg/controllers/proxy/network_services_controller.go +++ b/pkg/controllers/proxy/network_services_controller.go @@ -1782,6 +1782,16 @@ func (nsc *NetworkServicesController) Cleanup() { handle.Close() } + // In prep for further steps make sure that ipset and iptables handlers are created + if len(nsc.iptablesCmdHandlers) < 1 { + // Even though we have a config at this point (via passed param), we want to send nil so that the node will + // discover which IP address families it has and act accordingly + err = nsc.setupHandlers(nil, nil) + if err != nil { + klog.Errorf("could not cleanup because we couldn't create iptables/ipset command handlers due to: %v", err) + } + } + // cleanup iptables masquerade rule err = nsc.deleteMasqueradeIptablesRule() if err != nil { @@ -1790,15 +1800,21 @@ func (nsc *NetworkServicesController) Cleanup() { } // cleanup iptables hairpin rules - err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol) - if err != nil { - klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error()) - return + if _, ok := nsc.iptablesCmdHandlers[v1.IPv4Protocol]; ok { + klog.Info("Processing IPv4 hairpin rule cleanup") + err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol) + if err != nil { + klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error()) + return + } } - err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol) - if err != nil { - klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error()) - return + if _, ok := nsc.iptablesCmdHandlers[v1.IPv6Protocol]; ok { + klog.Info("Processing IPv6 hairpin rule cleanup") + err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol) + if err != nil { + klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error()) + return + } } nsc.cleanupIpvsFirewall() @@ -1927,6 +1943,70 @@ func (nsc *NetworkServicesController) handleServiceDelete(obj interface{}) { nsc.OnServiceUpdate(service) } +// setupHandlers Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line +// options) and the node has an IPv6 address, the following method will return an IPv6 address +func (nsc *NetworkServicesController) setupHandlers(config *options.KubeRouterConfig, node *v1.Node) error { + // node being nil covers the case where this function is called by something that doesn't have a kube-apiserver + // connection like the cleanup code. In this instance we want all possible iptables and ipset handlers + if node != nil { + nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node) + } + + // We test for nil configs as the Cleanup() method often doesn't have a valid config in this respect, so rather + // than trying to guess options, it is better to just let the logic fallthrough. For the primary path to this func, + // NewNetworkServicesController, the config will not be nil and we want to check that we have options that match + // the node's capability to ensure sanity later down the road. + if config != nil { + if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 && + len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 { + return fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node") + } + } + nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0 + if config != nil { + if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 && + len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 { + return fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node") + } + } + nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0 + + nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler) + nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler) + if node == nil || len(nsc.nodeIPv4Addrs) > 0 { + iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err != nil { + klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err) + return fmt.Errorf("failed to create iptables handler: %w", err) + } + nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler + + ipset, err := utils.NewIPSet(false) + if err != nil { + klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err) + return fmt.Errorf("failed to create ipset handler: %w", err) + } + nsc.ipSetHandlers[v1.IPv4Protocol] = ipset + } + if node == nil || len(nsc.nodeIPv6Addrs) > 0 { + iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6) + if err != nil { + klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err) + return fmt.Errorf("failed to create iptables handler: %w", err) + } + nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler + + ipset, err := utils.NewIPSet(true) + if err != nil { + klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err) + return fmt.Errorf("failed to create ipset handler: %w", err) + } + nsc.ipSetHandlers[v1.IPv6Protocol] = ipset + } + + return nil +} + // NewNetworkServicesController returns NetworkServicesController object func NewNetworkServicesController(clientset kubernetes.Interface, config *options.KubeRouterConfig, svcInformer cache.SharedIndexInformer, @@ -2021,51 +2101,9 @@ func NewNetworkServicesController(clientset kubernetes.Interface, return nil, err } - // Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line options) - // and the node has an IPv6 address, the following method will return an IPv6 address - nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node) - if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 && - len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 { - return nil, fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node") - } - nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0 - if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 && - len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 { - return nil, fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node") - } - nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0 - - nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler) - nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler) - if len(nsc.nodeIPv4Addrs) > 0 { - iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) - if err != nil { - klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err) - return nil, fmt.Errorf("failed to create iptables handler: %w", err) - } - nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler - - ipset, err := utils.NewIPSet(false) - if err != nil { - klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err) - return nil, fmt.Errorf("failed to create ipset handler: %w", err) - } - nsc.ipSetHandlers[v1.IPv4Protocol] = ipset - } - if len(nsc.nodeIPv6Addrs) > 0 { - iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6) - if err != nil { - klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err) - return nil, fmt.Errorf("failed to create iptables handler: %w", err) - } - nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler - - ipset, err := utils.NewIPSet(true) - if err != nil { - klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err) - return nil, fmt.Errorf("failed to create ipset handler: %w", err) - } - nsc.ipSetHandlers[v1.IPv6Protocol] = ipset + err = nsc.setupHandlers(config, node) + if err != nil { + return nil, err } automtu, err := utils.GetMTUFromNodeIP(nsc.primaryIP) diff --git a/pkg/controllers/routing/network_routes_controller.go b/pkg/controllers/routing/network_routes_controller.go index b347347a..8407cdae 100644 --- a/pkg/controllers/routing/network_routes_controller.go +++ b/pkg/controllers/routing/network_routes_controller.go @@ -896,6 +896,17 @@ func (nrc *NetworkRoutingController) setupOverlayTunnel(tunnelName string, nextH func (nrc *NetworkRoutingController) Cleanup() { klog.Infof("Cleaning up NetworkRoutesController configurations") + // In prep for further steps make sure that ipset and iptables handlers are created + if len(nrc.iptablesCmdHandlers) < 1 { + // Even though we have a config at this point (via passed param), we want to send nil so that the node will + // discover which IP address families it has and act accordingly + err := nrc.setupHandlers(nil) + if err != nil { + klog.Errorf("could not cleanup because iptables/ipset handlers could not be created due to: %v", err) + return + } + } + // Pod egress cleanup err := nrc.deletePodEgressRule() if err != nil { @@ -1359,7 +1370,55 @@ func (nrc *NetworkRoutingController) startBgpServer(grpcServer bool) error { return nil } -// func (nrc *NetworkRoutingController) getExternalNodeIPs( +func (nrc *NetworkRoutingController) setupHandlers(node *v1core.Node) error { + var err error + + // node being nil covers the case where this function is called by something that doesn't have a kube-apiserver + // connection like the cleanup code. In this instance we want all possible iptables and ipset handlers + if node != nil { + nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node) + if err != nil { + klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+ + "allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err) + return fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err) + } + } + + nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler) + nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler) + if node == nil || len(nrc.nodeIPv4Addrs) > 0 { + iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err != nil { + klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err) + return fmt.Errorf("failed to create iptables handler: %w", err) + } + nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler + + ipset, err := utils.NewIPSet(false) + if err != nil { + klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err) + return fmt.Errorf("failed to create ipset handler: %w", err) + } + nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset + } + if node == nil || len(nrc.nodeIPv6Addrs) > 0 { + iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6) + if err != nil { + klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err) + return fmt.Errorf("failed to create iptables handler: %w", err) + } + nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler + + ipset, err := utils.NewIPSet(true) + if err != nil { + klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err) + return fmt.Errorf("failed to create ipset handler: %w", err) + } + nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset + } + + return nil +} // NewNetworkRoutingController returns new NetworkRoutingController object func NewNetworkRoutingController(clientset kubernetes.Interface, @@ -1473,44 +1532,9 @@ func NewNetworkRoutingController(clientset kubernetes.Interface, } nrc.podCidr = cidr - nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node) + err = nrc.setupHandlers(node) if err != nil { - klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+ - "allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err) - return nil, fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err) - } - - nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler) - nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler) - if len(nrc.nodeIPv4Addrs) > 0 { - iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) - if err != nil { - klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err) - return nil, fmt.Errorf("failed to create iptables handler: %w", err) - } - nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler - - ipset, err := utils.NewIPSet(false) - if err != nil { - klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err) - return nil, fmt.Errorf("failed to create ipset handler: %w", err) - } - nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset - } - if len(nrc.nodeIPv6Addrs) > 0 { - iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6) - if err != nil { - klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err) - return nil, fmt.Errorf("failed to create iptables handler: %w", err) - } - nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler - - ipset, err := utils.NewIPSet(true) - if err != nil { - klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err) - return nil, fmt.Errorf("failed to create ipset handler: %w", err) - } - nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset + return nil, err } for _, handler := range nrc.ipSetHandlers {