fix(cleanup): add missing handlers for cleanup

kube-router v2.X introduced the idea of iptables and ipset handlers that
allow kube-router to be dual-stack capable. However, the cleanup logic
for the various controllers was not properly ported when this happened.
When the cleanup functions run, they often have not had their
controllers fully initialized as cleanup should not be dependant on
kube-router being able to reach a kube-apiserver.

As such, they were missing these handlers. And as such they either
silently ended up doing noops or worse, they would run into nil pointer
failures.

This corrects that, so that kube-router no longer fails this way and
cleans up as it had in v1.X.
This commit is contained in:
Aaron U'Ren 2024-04-21 11:47:00 -05:00 committed by Aaron U'Ren
parent 7755b4a67f
commit ecaad2c6e4
4 changed files with 177 additions and 96 deletions

View File

@ -723,6 +723,26 @@ func (npc *NetworkPolicyController) cleanupStaleIPSets(activePolicyIPSets map[st
func (npc *NetworkPolicyController) Cleanup() { func (npc *NetworkPolicyController) Cleanup() {
klog.Info("Cleaning up NetworkPolicyController configurations...") klog.Info("Cleaning up NetworkPolicyController configurations...")
if len(npc.iptablesCmdHandlers) < 1 {
iptablesCmdHandlers, ipSetHandlers, err := NewIPTablesHandlers(nil)
if err != nil {
klog.Errorf("unable to get iptables and ipset handlers: %v", err)
return
}
npc.iptablesCmdHandlers = iptablesCmdHandlers
npc.ipSetHandlers = ipSetHandlers
// Make other structures that we rely on
npc.iptablesSaveRestore = make(map[v1core.IPFamily]utils.IPTablesSaveRestorer, 2)
npc.iptablesSaveRestore[v1core.IPv4Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv4Protocol)
npc.iptablesSaveRestore[v1core.IPv6Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv6Protocol)
npc.filterTableRules = make(map[v1core.IPFamily]*bytes.Buffer, 2)
var buf bytes.Buffer
npc.filterTableRules[v1core.IPv4Protocol] = &buf
var buf2 bytes.Buffer
npc.filterTableRules[v1core.IPv6Protocol] = &buf2
}
var emptySet map[string]bool var emptySet map[string]bool
// Take a dump (iptables-save) of the current filter table for cleanupStaleRules() to work on // Take a dump (iptables-save) of the current filter table for cleanupStaleRules() to work on
for ipFamily, iptablesSaveRestore := range npc.iptablesSaveRestore { for ipFamily, iptablesSaveRestore := range npc.iptablesSaveRestore {
@ -762,7 +782,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) (
iptablesCmdHandlers := make(map[v1core.IPFamily]utils.IPTablesHandler, 2) iptablesCmdHandlers := make(map[v1core.IPFamily]utils.IPTablesHandler, 2)
ipSetHandlers := make(map[v1core.IPFamily]utils.IPSetHandler, 2) ipSetHandlers := make(map[v1core.IPFamily]utils.IPSetHandler, 2)
if config.EnableIPv4 { if config == nil || config.EnableIPv4 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err) return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err)
@ -775,7 +795,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) (
} }
ipSetHandlers[v1core.IPv4Protocol] = ipset ipSetHandlers[v1core.IPv4Protocol] = ipset
} }
if config.EnableIPv6 { if config == nil || config.EnableIPv6 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6) iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err) return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err)

View File

@ -75,15 +75,14 @@ func (hpc *hairpinController) ensureHairpinEnabledForPodInterface(endpointIP str
// WARN: This method is deprecated and will be removed once docker-shim is removed from kubelet. // WARN: This method is deprecated and will be removed once docker-shim is removed from kubelet.
pid, err = hpc.nsc.ln.getContainerPidWithDocker(containerID) pid, err = hpc.nsc.ln.getContainerPidWithDocker(containerID)
if err != nil { if err != nil {
return fmt.Errorf("failed to prepare endpoint %s to do direct server return due to %v", return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err)
endpointIP, err)
} }
} else { } else {
// We expect CRI compliant runtimes here // We expect CRI compliant runtimes here
// ugly workaround, refactoring of pkg/Proxy is required // ugly workaround, refactoring of pkg/Proxy is required
pid, err = hpc.nsc.ln.getContainerPidWithCRI(hpc.nsc.dsr.runtimeEndpoint, containerID) pid, err = hpc.nsc.ln.getContainerPidWithCRI(hpc.nsc.dsr.runtimeEndpoint, containerID)
if err != nil { if err != nil {
return fmt.Errorf("failed to prepare endpoint %s to do DSR due to: %v", endpointIP, err) return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err)
} }
} }
klog.V(2).Infof("Found PID %d for endpoint IP %s", pid, endpointIP) klog.V(2).Infof("Found PID %d for endpoint IP %s", pid, endpointIP)

View File

@ -1782,6 +1782,16 @@ func (nsc *NetworkServicesController) Cleanup() {
handle.Close() handle.Close()
} }
// In prep for further steps make sure that ipset and iptables handlers are created
if len(nsc.iptablesCmdHandlers) < 1 {
// Even though we have a config at this point (via passed param), we want to send nil so that the node will
// discover which IP address families it has and act accordingly
err = nsc.setupHandlers(nil, nil)
if err != nil {
klog.Errorf("could not cleanup because we couldn't create iptables/ipset command handlers due to: %v", err)
}
}
// cleanup iptables masquerade rule // cleanup iptables masquerade rule
err = nsc.deleteMasqueradeIptablesRule() err = nsc.deleteMasqueradeIptablesRule()
if err != nil { if err != nil {
@ -1790,16 +1800,22 @@ func (nsc *NetworkServicesController) Cleanup() {
} }
// cleanup iptables hairpin rules // cleanup iptables hairpin rules
if _, ok := nsc.iptablesCmdHandlers[v1.IPv4Protocol]; ok {
klog.Info("Processing IPv4 hairpin rule cleanup")
err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol) err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol)
if err != nil { if err != nil {
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error()) klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
return return
} }
}
if _, ok := nsc.iptablesCmdHandlers[v1.IPv6Protocol]; ok {
klog.Info("Processing IPv6 hairpin rule cleanup")
err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol) err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol)
if err != nil { if err != nil {
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error()) klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
return return
} }
}
nsc.cleanupIpvsFirewall() nsc.cleanupIpvsFirewall()
@ -1927,6 +1943,70 @@ func (nsc *NetworkServicesController) handleServiceDelete(obj interface{}) {
nsc.OnServiceUpdate(service) nsc.OnServiceUpdate(service)
} }
// setupHandlers Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line
// options) and the node has an IPv6 address, the following method will return an IPv6 address
func (nsc *NetworkServicesController) setupHandlers(config *options.KubeRouterConfig, node *v1.Node) error {
// node being nil covers the case where this function is called by something that doesn't have a kube-apiserver
// connection like the cleanup code. In this instance we want all possible iptables and ipset handlers
if node != nil {
nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node)
}
// We test for nil configs as the Cleanup() method often doesn't have a valid config in this respect, so rather
// than trying to guess options, it is better to just let the logic fallthrough. For the primary path to this func,
// NewNetworkServicesController, the config will not be nil and we want to check that we have options that match
// the node's capability to ensure sanity later down the road.
if config != nil {
if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 {
return fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node")
}
}
nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0
if config != nil {
if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 {
return fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node")
}
}
nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0
nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler)
nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler)
if node == nil || len(nsc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler
ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv4Protocol] = ipset
}
if node == nil || len(nsc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler
ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv6Protocol] = ipset
}
return nil
}
// NewNetworkServicesController returns NetworkServicesController object // NewNetworkServicesController returns NetworkServicesController object
func NewNetworkServicesController(clientset kubernetes.Interface, func NewNetworkServicesController(clientset kubernetes.Interface,
config *options.KubeRouterConfig, svcInformer cache.SharedIndexInformer, config *options.KubeRouterConfig, svcInformer cache.SharedIndexInformer,
@ -2021,51 +2101,9 @@ func NewNetworkServicesController(clientset kubernetes.Interface,
return nil, err return nil, err
} }
// Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line options) err = nsc.setupHandlers(config, node)
// and the node has an IPv6 address, the following method will return an IPv6 address
nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node)
if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 {
return nil, fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node")
}
nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0
if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 &&
len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 {
return nil, fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node")
}
nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0
nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler)
nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler)
if len(nsc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil { if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err) return nil, err
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler
ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv4Protocol] = ipset
}
if len(nsc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler
ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nsc.ipSetHandlers[v1.IPv6Protocol] = ipset
} }
automtu, err := utils.GetMTUFromNodeIP(nsc.primaryIP) automtu, err := utils.GetMTUFromNodeIP(nsc.primaryIP)

View File

@ -896,6 +896,17 @@ func (nrc *NetworkRoutingController) setupOverlayTunnel(tunnelName string, nextH
func (nrc *NetworkRoutingController) Cleanup() { func (nrc *NetworkRoutingController) Cleanup() {
klog.Infof("Cleaning up NetworkRoutesController configurations") klog.Infof("Cleaning up NetworkRoutesController configurations")
// In prep for further steps make sure that ipset and iptables handlers are created
if len(nrc.iptablesCmdHandlers) < 1 {
// Even though we have a config at this point (via passed param), we want to send nil so that the node will
// discover which IP address families it has and act accordingly
err := nrc.setupHandlers(nil)
if err != nil {
klog.Errorf("could not cleanup because iptables/ipset handlers could not be created due to: %v", err)
return
}
}
// Pod egress cleanup // Pod egress cleanup
err := nrc.deletePodEgressRule() err := nrc.deletePodEgressRule()
if err != nil { if err != nil {
@ -1359,7 +1370,55 @@ func (nrc *NetworkRoutingController) startBgpServer(grpcServer bool) error {
return nil return nil
} }
// func (nrc *NetworkRoutingController) getExternalNodeIPs( func (nrc *NetworkRoutingController) setupHandlers(node *v1core.Node) error {
var err error
// node being nil covers the case where this function is called by something that doesn't have a kube-apiserver
// connection like the cleanup code. In this instance we want all possible iptables and ipset handlers
if node != nil {
nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node)
if err != nil {
klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+
"allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err)
return fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err)
}
}
nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler)
nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler)
if node == nil || len(nrc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler
ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset
}
if node == nil || len(nrc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler
ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset
}
return nil
}
// NewNetworkRoutingController returns new NetworkRoutingController object // NewNetworkRoutingController returns new NetworkRoutingController object
func NewNetworkRoutingController(clientset kubernetes.Interface, func NewNetworkRoutingController(clientset kubernetes.Interface,
@ -1473,44 +1532,9 @@ func NewNetworkRoutingController(clientset kubernetes.Interface,
} }
nrc.podCidr = cidr nrc.podCidr = cidr
nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node) err = nrc.setupHandlers(node)
if err != nil { if err != nil {
klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+ return nil, err
"allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err)
return nil, fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err)
}
nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler)
nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler)
if len(nrc.nodeIPv4Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler
ipset, err := utils.NewIPSet(false)
if err != nil {
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset
}
if len(nrc.nodeIPv6Addrs) > 0 {
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
}
nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler
ipset, err := utils.NewIPSet(true)
if err != nil {
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
}
nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset
} }
for _, handler := range nrc.ipSetHandlers { for _, handler := range nrc.ipSetHandlers {