diff --git a/internal/app/machined/pkg/controllers/network/nftables_chain_config.go b/internal/app/machined/pkg/controllers/network/nftables_chain_config.go index 9b9b5c862..7c6961e21 100644 --- a/internal/app/machined/pkg/controllers/network/nftables_chain_config.go +++ b/internal/app/machined/pkg/controllers/network/nftables_chain_config.go @@ -22,11 +22,15 @@ import ( "github.com/siderolabs/talos/pkg/machinery/constants" "github.com/siderolabs/talos/pkg/machinery/nethelpers" "github.com/siderolabs/talos/pkg/machinery/resources/config" + "github.com/siderolabs/talos/pkg/machinery/resources/k8s" "github.com/siderolabs/talos/pkg/machinery/resources/network" ) -// IngressChainName is the name of the ingress chain. -const IngressChainName = "ingress" +// Chain names. +const ( + IngressChainName = "ingress" + PreroutingChainName = "prerouting" +) // NfTablesChainConfigController generates nftables rules based on machine configuration. type NfTablesChainConfigController struct{} @@ -45,6 +49,11 @@ func (ctrl *NfTablesChainConfigController) Inputs() []controller.Input { ID: optional.Some(config.V1Alpha1ID), Kind: controller.InputWeak, }, + { + Namespace: network.NamespaceName, + Type: network.NodeAddressType, + Kind: controller.InputWeak, + }, } } @@ -74,181 +83,32 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller return fmt.Errorf("error getting machine config: %w", err) } + // try first to get filtered node addresses, if not available, use non-filtered one + // this handles case of being part of Kubernetes cluster and not being part of it as well + nodeAddresses, err := safe.ReaderGetByID[*network.NodeAddress](ctx, r, network.FilteredNodeAddressID(network.NodeAddressRoutedID, k8s.NodeAddressFilterNoK8s)) + if err != nil && !state.IsNotFoundError(err) { + return fmt.Errorf("error getting filtered node addresses: %w", err) + } + + if nodeAddresses == nil { + nodeAddresses, err = safe.ReaderGetByID[*network.NodeAddress](ctx, r, network.NodeAddressRoutedID) + if err != nil && !state.IsNotFoundError(err) { + return fmt.Errorf("error getting node addresses: %w", err) + } + } + r.StartTrackingOutputs() if cfg != nil && !(cfg.Config().NetworkRules().DefaultAction() == nethelpers.DefaultActionAccept && cfg.Config().NetworkRules().Rules() == nil) { - if err = safe.WriterModify(ctx, r, network.NewNfTablesChain(network.NamespaceName, IngressChainName), - func(chain *network.NfTablesChain) error { - spec := chain.TypedSpec() - - spec.Type = nethelpers.ChainTypeFilter - spec.Hook = nethelpers.ChainHookInput - spec.Priority = nethelpers.ChainPriorityMangle + 10 - spec.Policy = nethelpers.VerdictAccept - - // preamble - spec.Rules = []network.NfTablesRule{ - // trusted interfaces: loopback, siderolink and kubespan - { - MatchIIfName: &network.NfTablesIfNameMatch{ - InterfaceNames: []string{ - "lo", - constants.SideroLinkName, - constants.KubeSpanLinkName, - }, - Operator: nethelpers.OperatorEqual, - }, - AnonCounter: true, - Verdict: pointer.To(nethelpers.VerdictAccept), - }, - } - - defaultAction := cfg.Config().NetworkRules().DefaultAction() - - if defaultAction == nethelpers.DefaultActionBlock { - spec.Policy = nethelpers.VerdictDrop - - spec.Rules = append(spec.Rules, - // conntrack - network.NfTablesRule{ - MatchConntrackState: &network.NfTablesConntrackStateMatch{ - States: []nethelpers.ConntrackState{ - nethelpers.ConntrackStateEstablished, - nethelpers.ConntrackStateRelated, - }, - }, - AnonCounter: true, - Verdict: pointer.To(nethelpers.VerdictAccept), - }, - network.NfTablesRule{ - MatchConntrackState: &network.NfTablesConntrackStateMatch{ - States: []nethelpers.ConntrackState{ - nethelpers.ConntrackStateInvalid, - }, - }, - AnonCounter: true, - Verdict: pointer.To(nethelpers.VerdictDrop), - }, - // allow ICMP and ICMPv6 explicitly - network.NfTablesRule{ - MatchLayer4: &network.NfTablesLayer4Match{ - Protocol: nethelpers.ProtocolICMP, - }, - MatchLimit: &network.NfTablesLimitMatch{ - PacketRatePerSecond: 5, - }, - AnonCounter: true, - Verdict: pointer.To(nethelpers.VerdictAccept), - }, - network.NfTablesRule{ - MatchLayer4: &network.NfTablesLayer4Match{ - Protocol: nethelpers.ProtocolICMPv6, - }, - MatchLimit: &network.NfTablesLimitMatch{ - PacketRatePerSecond: 5, - }, - AnonCounter: true, - Verdict: pointer.To(nethelpers.VerdictAccept), - }, - ) - - if cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil { - if cfg.Config().Machine().Features().HostDNS().ForwardKubeDNSToHost() { - hostDNSIP := netip.MustParseAddr(constants.HostDNSAddress) - - // allow traffic to host DNS - for _, protocol := range []nethelpers.Protocol{nethelpers.ProtocolUDP, nethelpers.ProtocolTCP} { - spec.Rules = append(spec.Rules, - network.NfTablesRule{ - MatchSourceAddress: &network.NfTablesAddressMatch{ - IncludeSubnets: xslices.Map( - slices.Concat( - cfg.Config().Cluster().Network().PodCIDRs(), - cfg.Config().Cluster().Network().ServiceCIDRs(), - ), - netip.MustParsePrefix, - ), - }, - MatchDestinationAddress: &network.NfTablesAddressMatch{ - IncludeSubnets: []netip.Prefix{netip.PrefixFrom(hostDNSIP, hostDNSIP.BitLen())}, - }, - MatchLayer4: &network.NfTablesLayer4Match{ - Protocol: protocol, - MatchDestinationPort: &network.NfTablesPortMatch{ - Ranges: []network.PortRange{{Lo: 53, Hi: 53}}, - }, - }, - AnonCounter: true, - Verdict: pointer.To(nethelpers.VerdictAccept), - }, - ) - } - } - } - - if cfg.Config().Cluster() != nil { - spec.Rules = append(spec.Rules, - // allow Kubernetes pod/service traffic - network.NfTablesRule{ - MatchSourceAddress: &network.NfTablesAddressMatch{ - IncludeSubnets: xslices.Map( - slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()), - netip.MustParsePrefix, - ), - }, - MatchDestinationAddress: &network.NfTablesAddressMatch{ - IncludeSubnets: xslices.Map( - slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()), - netip.MustParsePrefix, - ), - }, - AnonCounter: true, - Verdict: pointer.To(nethelpers.VerdictAccept), - }, - ) - } - } - - for _, rule := range cfg.Config().NetworkRules().Rules() { - portRanges := rule.PortRanges() - - // sort port ranges, machine config validation ensures that there are no overlaps - slices.SortFunc(portRanges, func(a, b [2]uint16) int { - return cmp.Compare(a[0], b[0]) - }) - - // if default accept, drop anything that doesn't match the rule - verdict := nethelpers.VerdictDrop - - if defaultAction == nethelpers.DefaultActionBlock { - verdict = nethelpers.VerdictAccept - } - - spec.Rules = append(spec.Rules, - network.NfTablesRule{ - MatchSourceAddress: &network.NfTablesAddressMatch{ - IncludeSubnets: rule.Subnets(), - ExcludeSubnets: rule.ExceptSubnets(), - Invert: defaultAction == nethelpers.DefaultActionAccept, - }, - MatchLayer4: &network.NfTablesLayer4Match{ - Protocol: rule.Protocol(), - MatchDestinationPort: &network.NfTablesPortMatch{ - Ranges: xslices.Map(portRanges, func(pr [2]uint16) network.PortRange { - return network.PortRange{Lo: pr[0], Hi: pr[1]} - }), - }, - }, - AnonCounter: true, - Verdict: pointer.To(verdict), - }, - ) - } - - return nil - }); err != nil { + if err = safe.WriterModify(ctx, r, network.NewNfTablesChain(network.NamespaceName, IngressChainName), ctrl.buildIngressChain(cfg)); err != nil { return err } + + if nodeAddresses != nil { + if err = safe.WriterModify(ctx, r, network.NewNfTablesChain(network.NamespaceName, PreroutingChainName), ctrl.buildPreroutingChain(cfg, nodeAddresses)); err != nil { + return err + } + } } if err = safe.CleanupOutputs[*network.NfTablesChain](ctx, r); err != nil { @@ -256,3 +116,297 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller } } } + +func (ctrl *NfTablesChainConfigController) buildIngressChain(cfg *config.MachineConfig) func(*network.NfTablesChain) error { + return func(chain *network.NfTablesChain) error { + spec := chain.TypedSpec() + + spec.Type = nethelpers.ChainTypeFilter + spec.Hook = nethelpers.ChainHookInput + spec.Priority = nethelpers.ChainPriorityMangle + 10 + spec.Policy = nethelpers.VerdictAccept + + // preamble + spec.Rules = []network.NfTablesRule{ + // trusted interfaces: loopback, siderolink and kubespan + { + MatchIIfName: &network.NfTablesIfNameMatch{ + InterfaceNames: []string{ + "lo", + constants.SideroLinkName, + constants.KubeSpanLinkName, + }, + Operator: nethelpers.OperatorEqual, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + } + + defaultAction := cfg.Config().NetworkRules().DefaultAction() + + if defaultAction == nethelpers.DefaultActionBlock { + spec.Policy = nethelpers.VerdictDrop + + spec.Rules = append(spec.Rules, + // conntrack + network.NfTablesRule{ + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateEstablished, + nethelpers.ConntrackStateRelated, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + network.NfTablesRule{ + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateInvalid, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictDrop), + }, + // allow ICMP and ICMPv6 explicitly + network.NfTablesRule{ + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolICMP, + }, + MatchLimit: &network.NfTablesLimitMatch{ + PacketRatePerSecond: 5, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + network.NfTablesRule{ + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolICMPv6, + }, + MatchLimit: &network.NfTablesLimitMatch{ + PacketRatePerSecond: 5, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + ) + + if cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil { + if cfg.Config().Machine().Features().HostDNS().ForwardKubeDNSToHost() { + hostDNSIP := netip.MustParseAddr(constants.HostDNSAddress) + + // allow traffic to host DNS + for _, protocol := range []nethelpers.Protocol{nethelpers.ProtocolUDP, nethelpers.ProtocolTCP} { + spec.Rules = append(spec.Rules, + network.NfTablesRule{ + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: xslices.Map( + slices.Concat( + cfg.Config().Cluster().Network().PodCIDRs(), + cfg.Config().Cluster().Network().ServiceCIDRs(), + ), + netip.MustParsePrefix, + ), + }, + MatchDestinationAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{netip.PrefixFrom(hostDNSIP, hostDNSIP.BitLen())}, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: protocol, + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: []network.PortRange{{Lo: 53, Hi: 53}}, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + ) + } + } + } + + if cfg.Config().Cluster() != nil { + spec.Rules = append(spec.Rules, + // allow Kubernetes pod/service traffic + network.NfTablesRule{ + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: xslices.Map( + slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()), + netip.MustParsePrefix, + ), + }, + MatchDestinationAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: xslices.Map( + slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()), + netip.MustParsePrefix, + ), + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + ) + } + } + + for _, rule := range cfg.Config().NetworkRules().Rules() { + portRanges := rule.PortRanges() + + // sort port ranges, machine config validation ensures that there are no overlaps + slices.SortFunc(portRanges, func(a, b [2]uint16) int { + return cmp.Compare(a[0], b[0]) + }) + + // if default accept, drop anything that doesn't match the rule + verdict := nethelpers.VerdictDrop + + if defaultAction == nethelpers.DefaultActionBlock { + verdict = nethelpers.VerdictAccept + } + + spec.Rules = append(spec.Rules, + network.NfTablesRule{ + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: rule.Subnets(), + ExcludeSubnets: rule.ExceptSubnets(), + Invert: defaultAction == nethelpers.DefaultActionAccept, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: rule.Protocol(), + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: xslices.Map(portRanges, func(pr [2]uint16) network.PortRange { + return network.PortRange{Lo: pr[0], Hi: pr[1]} + }), + }, + }, + AnonCounter: true, + Verdict: pointer.To(verdict), + }, + ) + } + + return nil + } +} + +func (ctrl *NfTablesChainConfigController) buildPreroutingChain(cfg *config.MachineConfig, nodeAddresses *network.NodeAddress) func(*network.NfTablesChain) error { + // convert CIDRs to /32 (/128) prefixes matching only the address itself + myAddresses := xslices.Map(nodeAddresses.TypedSpec().Addresses, + func(addr netip.Prefix) netip.Prefix { + return netip.PrefixFrom(addr.Addr(), addr.Addr().BitLen()) + }, + ) + + return func(chain *network.NfTablesChain) error { + spec := chain.TypedSpec() + + spec.Type = nethelpers.ChainTypeFilter + spec.Hook = nethelpers.ChainHookPrerouting + spec.Priority = nethelpers.ChainPriorityNATDest - 10 + spec.Policy = nethelpers.VerdictAccept + + defaultAction := cfg.Config().NetworkRules().DefaultAction() + + // preamble + spec.Rules = []network.NfTablesRule{ + // trusted interfaces: loopback, siderolink and kubespan + { + MatchIIfName: &network.NfTablesIfNameMatch{ + InterfaceNames: []string{ + "lo", + constants.SideroLinkName, + constants.KubeSpanLinkName, + }, + Operator: nethelpers.OperatorEqual, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + } + + // if the traffic is not addressed to the machine, ignore (accept it) + spec.Rules = append(spec.Rules, + network.NfTablesRule{ + MatchDestinationAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: myAddresses, + Invert: true, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + ) + + // drop any 'new' connections to ports outside of the allowed ranges + for _, rule := range cfg.Config().NetworkRules().Rules() { + portRanges := rule.PortRanges() + + // sort port ranges, machine config validation ensures that there are no overlaps + slices.SortFunc(portRanges, func(a, b [2]uint16) int { + return cmp.Compare(a[0], b[0]) + }) + + verdict := nethelpers.VerdictDrop + + if defaultAction == nethelpers.DefaultActionBlock { + verdict = nethelpers.VerdictAccept + } + + spec.Rules = append(spec.Rules, + network.NfTablesRule{ + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: rule.Subnets(), + ExcludeSubnets: rule.ExceptSubnets(), + Invert: defaultAction == nethelpers.DefaultActionAccept, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: rule.Protocol(), + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: xslices.Map(portRanges, func(pr [2]uint16) network.PortRange { + return network.PortRange{Lo: pr[0], Hi: pr[1]} + }), + }, + }, + AnonCounter: true, + Verdict: pointer.To(verdict), + }, + ) + } + + if defaultAction == nethelpers.DefaultActionBlock { + // drop any TCP/UDP new connections + spec.Rules = append(spec.Rules, + network.NfTablesRule{ + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolTCP, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictDrop), + }, + network.NfTablesRule{ + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolUDP, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictDrop), + }, + ) + } + + return nil + } +} diff --git a/internal/app/machined/pkg/controllers/network/nftables_chain_config_test.go b/internal/app/machined/pkg/controllers/network/nftables_chain_config_test.go index c57a532ee..0c4306a2a 100644 --- a/internal/app/machined/pkg/controllers/network/nftables_chain_config_test.go +++ b/internal/app/machined/pkg/controllers/network/nftables_chain_config_test.go @@ -75,7 +75,11 @@ func (suite *NfTablesChainConfigTestSuite) injectConfig(block bool) { cfg, err := container.New(configs...) suite.Require().NoError(err) - suite.Require().NoError(suite.State().Create(suite.Ctx(), config.NewMachineConfig(cfg))) + suite.Create(config.NewMachineConfig(cfg)) + + nodeAddresses := network.NewNodeAddress(network.NamespaceName, network.NodeAddressRoutedID) + nodeAddresses.TypedSpec().Addresses = []netip.Prefix{netip.MustParsePrefix("10.3.4.5/24")} + suite.Create(nodeAddresses) } func (suite *NfTablesChainConfigTestSuite) TestDefaultAccept() { @@ -154,6 +158,98 @@ func (suite *NfTablesChainConfigTestSuite) TestDefaultAccept() { }, spec.Rules) }) + + ctest.AssertResource(suite, netctrl.PreroutingChainName, func(chain *network.NfTablesChain, asrt *assert.Assertions) { + spec := chain.TypedSpec() + + asrt.Equal(nethelpers.ChainTypeFilter, spec.Type) + asrt.Equal(nethelpers.ChainPriorityNATDest-10, spec.Priority) + asrt.Equal(nethelpers.ChainHookPrerouting, spec.Hook) + asrt.Equal(nethelpers.VerdictAccept, spec.Policy) + + asrt.Equal( + []network.NfTablesRule{ + { + MatchIIfName: &network.NfTablesIfNameMatch{ + InterfaceNames: []string{ + "lo", + constants.SideroLinkName, + constants.KubeSpanLinkName, + }, + Operator: nethelpers.OperatorEqual, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + { + MatchDestinationAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("10.3.4.5/32"), + }, + Invert: true, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + { + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("192.168.0.0/16"), + }, + ExcludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("10.3.0.0/16"), + }, + Invert: true, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolTCP, + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: []network.PortRange{ + { + Lo: 10250, + Hi: 10250, + }, + }, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictDrop), + }, + { + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("0.0.0.0/0"), + }, + Invert: true, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolTCP, + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: []network.PortRange{ + { + Lo: 50000, + Hi: 50000, + }, + }, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictDrop), + }, + }, + spec.Rules) + }) } func (suite *NfTablesChainConfigTestSuite) TestDefaultBlock() { @@ -269,9 +365,125 @@ func (suite *NfTablesChainConfigTestSuite) TestDefaultBlock() { }, spec.Rules) }) + + ctest.AssertResource(suite, netctrl.PreroutingChainName, func(chain *network.NfTablesChain, asrt *assert.Assertions) { + spec := chain.TypedSpec() + + asrt.Equal(nethelpers.ChainTypeFilter, spec.Type) + asrt.Equal(nethelpers.ChainPriorityNATDest-10, spec.Priority) + asrt.Equal(nethelpers.ChainHookPrerouting, spec.Hook) + asrt.Equal(nethelpers.VerdictAccept, spec.Policy) + + asrt.Equal( + []network.NfTablesRule{ + { + MatchIIfName: &network.NfTablesIfNameMatch{ + InterfaceNames: []string{ + "lo", + constants.SideroLinkName, + constants.KubeSpanLinkName, + }, + Operator: nethelpers.OperatorEqual, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + { + MatchDestinationAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("10.3.4.5/32"), + }, + Invert: true, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + { + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("10.0.0.0/8"), + netip.MustParsePrefix("192.168.0.0/16"), + }, + ExcludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("10.3.0.0/16"), + }, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolTCP, + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: []network.PortRange{ + { + Lo: 10250, + Hi: 10250, + }, + }, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + { + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{ + netip.MustParsePrefix("0.0.0.0/0"), + }, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolTCP, + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: []network.PortRange{ + { + Lo: 50000, + Hi: 50000, + }, + }, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + { + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolTCP, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictDrop), + }, + { + MatchConntrackState: &network.NfTablesConntrackStateMatch{ + States: []nethelpers.ConntrackState{ + nethelpers.ConntrackStateNew, + }, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: nethelpers.ProtocolUDP, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictDrop), + }, + }, + spec.Rules) + }) } func TestNfTablesChainConfig(t *testing.T) { + t.Parallel() + suite.Run(t, &NfTablesChainConfigTestSuite{ DefaultSuite: ctest.DefaultSuite{ Timeout: 5 * time.Second, diff --git a/internal/integration/api/firewall.go b/internal/integration/api/firewall.go index fb59252c2..610d5add8 100644 --- a/internal/integration/api/firewall.go +++ b/internal/integration/api/firewall.go @@ -9,27 +9,31 @@ package api import ( "context" "crypto/tls" + _ "embed" "errors" "fmt" "net" "net/http" "os" "strconv" + "strings" "time" "github.com/cosi-project/runtime/pkg/safe" "github.com/hashicorp/go-cleanhttp" "golang.org/x/sync/errgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/siderolabs/talos/internal/integration/base" "github.com/siderolabs/talos/pkg/machinery/client" "github.com/siderolabs/talos/pkg/machinery/constants" + "github.com/siderolabs/talos/pkg/machinery/nethelpers" "github.com/siderolabs/talos/pkg/machinery/resources/network" ) // FirewallSuite ... type FirewallSuite struct { - base.APISuite + base.K8sSuite ctx context.Context //nolint:containedctx ctxCancel context.CancelFunc @@ -113,6 +117,92 @@ func (suite *FirewallSuite) TestKubeletAccess() { suite.Require().NoError(eg.Wait()) } +//go:embed testdata/nodeport.yaml +var nodePortServiceYAML []byte + +// TestNodePortAccess verifies that without firewall NodePort is available, and not available otherwise. +// +//nolint:gocyclo +func (suite *FirewallSuite) TestNodePortAccess() { + allNodes := suite.DiscoverNodeInternalIPs(suite.ctx) + + chain, err := safe.StateGetByID[*network.NfTablesChain](client.WithNode(suite.ctx, allNodes[0]), suite.Client.COSI, "ingress") + firewallEnabled := err == nil + firewallDefaultBlock := firewallEnabled && chain.TypedSpec().Policy == nethelpers.VerdictDrop + + // our blocking only works with kube-proxy, so we need to make sure it's running + out, err := suite.Clientset.CoreV1().Pods("kube-system").List(suite.ctx, metav1.ListOptions{LabelSelector: "k8s-app=kube-proxy"}) + suite.Require().NoError(err) + + if len(out.Items) == 0 { + suite.T().Skip("kube-proxy not running") + } + + // create a deployment with a NodePort service + localPathStorage := suite.ParseManifests(nodePortServiceYAML) + + suite.T().Cleanup(func() { + cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute) + defer cleanupCancel() + + suite.DeleteManifests(cleanUpCtx, localPathStorage) + }) + + suite.ApplyManifests(suite.ctx, localPathStorage) + + // fetch the NodePort service + // read back Service to figure out the ports + svc, err := suite.Clientset.CoreV1().Services("default").Get(suite.ctx, "test-nginx", metav1.GetOptions{}) + suite.Require().NoError(err) + + var nodePort int + + for _, portSpec := range svc.Spec.Ports { + nodePort = int(portSpec.NodePort) + } + + suite.Require().NotZero(nodePort) + + suite.T().Log("sleeping for 5 seconds to allow kube-proxy to update nftables") + + time.Sleep(5 * time.Second) + + eg, ctx := errgroup.WithContext(suite.ctx) + + for _, node := range allNodes { + eg.Go(func() error { + attemptCtx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + var d net.Dialer + + conn, err := d.DialContext(attemptCtx, "tcp", net.JoinHostPort(node, strconv.Itoa(nodePort))) + if conn != nil { + conn.Close() //nolint:errcheck + } + + if firewallDefaultBlock { + if err == nil { + return errors.New("nodePort API should not be available") + } + + if !errors.Is(err, os.ErrDeadlineExceeded) && !errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("unexpected error: %w", err) + } + } else if err != nil { + // ignore connection refused, as it's not firewall, but rather service proxy not ready yet + if !strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("nodePort API should be available: %w", err) + } + } + + return nil + }) + } + + suite.Require().NoError(eg.Wait()) +} + func init() { allSuites = append(allSuites, new(FirewallSuite)) } diff --git a/internal/integration/api/testdata/nodeport.yaml b/internal/integration/api/testdata/nodeport.yaml new file mode 100644 index 000000000..ed050fd4d --- /dev/null +++ b/internal/integration/api/testdata/nodeport.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: test-nginx + name: test-nginx + namespace: default +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: test-nginx + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: test-nginx + spec: + containers: + - image: nginx + imagePullPolicy: Always + name: nginx + ports: + - containerPort: 80 + protocol: TCP + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + restartPolicy: Always + terminationGracePeriodSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: test-nginx + name: test-nginx + namespace: default +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: test-nginx + type: NodePort