fix: block NodePort services with ingress firewall

The previous fix #10354 was not full/complete.

The problem lies in the fact that `kube-proxy` creates a rule like:

```
chain nat-prerouting {
	type nat hook prerouting priority dstnat; policy accept;
	jump services
}
```

This chain has a prerouting hook, which gets executed before Talos's
input hook, and rewrites (does DNAT) for NodePort services before Talos
has a chance to block the packet, but rewritten packet hits the input
chain with DNAT address, or might be forwarded to another host and never
hit the firewall again.

Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
This commit is contained in:
Andrey Smirnov 2025-02-27 20:20:53 +04:00
parent fd8131cb86
commit a3f88d2ef5
No known key found for this signature in database
GPG Key ID: FE042E3D4085A811
4 changed files with 689 additions and 174 deletions

View File

@ -22,11 +22,15 @@ import (
"github.com/siderolabs/talos/pkg/machinery/constants" "github.com/siderolabs/talos/pkg/machinery/constants"
"github.com/siderolabs/talos/pkg/machinery/nethelpers" "github.com/siderolabs/talos/pkg/machinery/nethelpers"
"github.com/siderolabs/talos/pkg/machinery/resources/config" "github.com/siderolabs/talos/pkg/machinery/resources/config"
"github.com/siderolabs/talos/pkg/machinery/resources/k8s"
"github.com/siderolabs/talos/pkg/machinery/resources/network" "github.com/siderolabs/talos/pkg/machinery/resources/network"
) )
// IngressChainName is the name of the ingress chain. // Chain names.
const IngressChainName = "ingress" const (
IngressChainName = "ingress"
PreroutingChainName = "prerouting"
)
// NfTablesChainConfigController generates nftables rules based on machine configuration. // NfTablesChainConfigController generates nftables rules based on machine configuration.
type NfTablesChainConfigController struct{} type NfTablesChainConfigController struct{}
@ -45,6 +49,11 @@ func (ctrl *NfTablesChainConfigController) Inputs() []controller.Input {
ID: optional.Some(config.V1Alpha1ID), ID: optional.Some(config.V1Alpha1ID),
Kind: controller.InputWeak, Kind: controller.InputWeak,
}, },
{
Namespace: network.NamespaceName,
Type: network.NodeAddressType,
Kind: controller.InputWeak,
},
} }
} }
@ -74,181 +83,32 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller
return fmt.Errorf("error getting machine config: %w", err) return fmt.Errorf("error getting machine config: %w", err)
} }
// try first to get filtered node addresses, if not available, use non-filtered one
// this handles case of being part of Kubernetes cluster and not being part of it as well
nodeAddresses, err := safe.ReaderGetByID[*network.NodeAddress](ctx, r, network.FilteredNodeAddressID(network.NodeAddressRoutedID, k8s.NodeAddressFilterNoK8s))
if err != nil && !state.IsNotFoundError(err) {
return fmt.Errorf("error getting filtered node addresses: %w", err)
}
if nodeAddresses == nil {
nodeAddresses, err = safe.ReaderGetByID[*network.NodeAddress](ctx, r, network.NodeAddressRoutedID)
if err != nil && !state.IsNotFoundError(err) {
return fmt.Errorf("error getting node addresses: %w", err)
}
}
r.StartTrackingOutputs() r.StartTrackingOutputs()
if cfg != nil && !(cfg.Config().NetworkRules().DefaultAction() == nethelpers.DefaultActionAccept && cfg.Config().NetworkRules().Rules() == nil) { if cfg != nil && !(cfg.Config().NetworkRules().DefaultAction() == nethelpers.DefaultActionAccept && cfg.Config().NetworkRules().Rules() == nil) {
if err = safe.WriterModify(ctx, r, network.NewNfTablesChain(network.NamespaceName, IngressChainName), if err = safe.WriterModify(ctx, r, network.NewNfTablesChain(network.NamespaceName, IngressChainName), ctrl.buildIngressChain(cfg)); err != nil {
func(chain *network.NfTablesChain) error {
spec := chain.TypedSpec()
spec.Type = nethelpers.ChainTypeFilter
spec.Hook = nethelpers.ChainHookInput
spec.Priority = nethelpers.ChainPriorityMangle + 10
spec.Policy = nethelpers.VerdictAccept
// preamble
spec.Rules = []network.NfTablesRule{
// trusted interfaces: loopback, siderolink and kubespan
{
MatchIIfName: &network.NfTablesIfNameMatch{
InterfaceNames: []string{
"lo",
constants.SideroLinkName,
constants.KubeSpanLinkName,
},
Operator: nethelpers.OperatorEqual,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
}
defaultAction := cfg.Config().NetworkRules().DefaultAction()
if defaultAction == nethelpers.DefaultActionBlock {
spec.Policy = nethelpers.VerdictDrop
spec.Rules = append(spec.Rules,
// conntrack
network.NfTablesRule{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateEstablished,
nethelpers.ConntrackStateRelated,
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
network.NfTablesRule{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateInvalid,
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
// allow ICMP and ICMPv6 explicitly
network.NfTablesRule{
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolICMP,
},
MatchLimit: &network.NfTablesLimitMatch{
PacketRatePerSecond: 5,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
network.NfTablesRule{
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolICMPv6,
},
MatchLimit: &network.NfTablesLimitMatch{
PacketRatePerSecond: 5,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
if cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil {
if cfg.Config().Machine().Features().HostDNS().ForwardKubeDNSToHost() {
hostDNSIP := netip.MustParseAddr(constants.HostDNSAddress)
// allow traffic to host DNS
for _, protocol := range []nethelpers.Protocol{nethelpers.ProtocolUDP, nethelpers.ProtocolTCP} {
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
slices.Concat(
cfg.Config().Cluster().Network().PodCIDRs(),
cfg.Config().Cluster().Network().ServiceCIDRs(),
),
netip.MustParsePrefix,
),
},
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{netip.PrefixFrom(hostDNSIP, hostDNSIP.BitLen())},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: protocol,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{{Lo: 53, Hi: 53}},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
}
}
}
if cfg.Config().Cluster() != nil {
spec.Rules = append(spec.Rules,
// allow Kubernetes pod/service traffic
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()),
netip.MustParsePrefix,
),
},
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()),
netip.MustParsePrefix,
),
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
}
}
for _, rule := range cfg.Config().NetworkRules().Rules() {
portRanges := rule.PortRanges()
// sort port ranges, machine config validation ensures that there are no overlaps
slices.SortFunc(portRanges, func(a, b [2]uint16) int {
return cmp.Compare(a[0], b[0])
})
// if default accept, drop anything that doesn't match the rule
verdict := nethelpers.VerdictDrop
if defaultAction == nethelpers.DefaultActionBlock {
verdict = nethelpers.VerdictAccept
}
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: rule.Subnets(),
ExcludeSubnets: rule.ExceptSubnets(),
Invert: defaultAction == nethelpers.DefaultActionAccept,
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: rule.Protocol(),
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: xslices.Map(portRanges, func(pr [2]uint16) network.PortRange {
return network.PortRange{Lo: pr[0], Hi: pr[1]}
}),
},
},
AnonCounter: true,
Verdict: pointer.To(verdict),
},
)
}
return nil
}); err != nil {
return err return err
} }
if nodeAddresses != nil {
if err = safe.WriterModify(ctx, r, network.NewNfTablesChain(network.NamespaceName, PreroutingChainName), ctrl.buildPreroutingChain(cfg, nodeAddresses)); err != nil {
return err
}
}
} }
if err = safe.CleanupOutputs[*network.NfTablesChain](ctx, r); err != nil { if err = safe.CleanupOutputs[*network.NfTablesChain](ctx, r); err != nil {
@ -256,3 +116,297 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller
} }
} }
} }
func (ctrl *NfTablesChainConfigController) buildIngressChain(cfg *config.MachineConfig) func(*network.NfTablesChain) error {
return func(chain *network.NfTablesChain) error {
spec := chain.TypedSpec()
spec.Type = nethelpers.ChainTypeFilter
spec.Hook = nethelpers.ChainHookInput
spec.Priority = nethelpers.ChainPriorityMangle + 10
spec.Policy = nethelpers.VerdictAccept
// preamble
spec.Rules = []network.NfTablesRule{
// trusted interfaces: loopback, siderolink and kubespan
{
MatchIIfName: &network.NfTablesIfNameMatch{
InterfaceNames: []string{
"lo",
constants.SideroLinkName,
constants.KubeSpanLinkName,
},
Operator: nethelpers.OperatorEqual,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
}
defaultAction := cfg.Config().NetworkRules().DefaultAction()
if defaultAction == nethelpers.DefaultActionBlock {
spec.Policy = nethelpers.VerdictDrop
spec.Rules = append(spec.Rules,
// conntrack
network.NfTablesRule{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateEstablished,
nethelpers.ConntrackStateRelated,
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
network.NfTablesRule{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateInvalid,
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
// allow ICMP and ICMPv6 explicitly
network.NfTablesRule{
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolICMP,
},
MatchLimit: &network.NfTablesLimitMatch{
PacketRatePerSecond: 5,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
network.NfTablesRule{
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolICMPv6,
},
MatchLimit: &network.NfTablesLimitMatch{
PacketRatePerSecond: 5,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
if cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil {
if cfg.Config().Machine().Features().HostDNS().ForwardKubeDNSToHost() {
hostDNSIP := netip.MustParseAddr(constants.HostDNSAddress)
// allow traffic to host DNS
for _, protocol := range []nethelpers.Protocol{nethelpers.ProtocolUDP, nethelpers.ProtocolTCP} {
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
slices.Concat(
cfg.Config().Cluster().Network().PodCIDRs(),
cfg.Config().Cluster().Network().ServiceCIDRs(),
),
netip.MustParsePrefix,
),
},
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{netip.PrefixFrom(hostDNSIP, hostDNSIP.BitLen())},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: protocol,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{{Lo: 53, Hi: 53}},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
}
}
}
if cfg.Config().Cluster() != nil {
spec.Rules = append(spec.Rules,
// allow Kubernetes pod/service traffic
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()),
netip.MustParsePrefix,
),
},
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()),
netip.MustParsePrefix,
),
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
}
}
for _, rule := range cfg.Config().NetworkRules().Rules() {
portRanges := rule.PortRanges()
// sort port ranges, machine config validation ensures that there are no overlaps
slices.SortFunc(portRanges, func(a, b [2]uint16) int {
return cmp.Compare(a[0], b[0])
})
// if default accept, drop anything that doesn't match the rule
verdict := nethelpers.VerdictDrop
if defaultAction == nethelpers.DefaultActionBlock {
verdict = nethelpers.VerdictAccept
}
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: rule.Subnets(),
ExcludeSubnets: rule.ExceptSubnets(),
Invert: defaultAction == nethelpers.DefaultActionAccept,
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: rule.Protocol(),
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: xslices.Map(portRanges, func(pr [2]uint16) network.PortRange {
return network.PortRange{Lo: pr[0], Hi: pr[1]}
}),
},
},
AnonCounter: true,
Verdict: pointer.To(verdict),
},
)
}
return nil
}
}
func (ctrl *NfTablesChainConfigController) buildPreroutingChain(cfg *config.MachineConfig, nodeAddresses *network.NodeAddress) func(*network.NfTablesChain) error {
// convert CIDRs to /32 (/128) prefixes matching only the address itself
myAddresses := xslices.Map(nodeAddresses.TypedSpec().Addresses,
func(addr netip.Prefix) netip.Prefix {
return netip.PrefixFrom(addr.Addr(), addr.Addr().BitLen())
},
)
return func(chain *network.NfTablesChain) error {
spec := chain.TypedSpec()
spec.Type = nethelpers.ChainTypeFilter
spec.Hook = nethelpers.ChainHookPrerouting
spec.Priority = nethelpers.ChainPriorityNATDest - 10
spec.Policy = nethelpers.VerdictAccept
defaultAction := cfg.Config().NetworkRules().DefaultAction()
// preamble
spec.Rules = []network.NfTablesRule{
// trusted interfaces: loopback, siderolink and kubespan
{
MatchIIfName: &network.NfTablesIfNameMatch{
InterfaceNames: []string{
"lo",
constants.SideroLinkName,
constants.KubeSpanLinkName,
},
Operator: nethelpers.OperatorEqual,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
}
// if the traffic is not addressed to the machine, ignore (accept it)
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: myAddresses,
Invert: true,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
// drop any 'new' connections to ports outside of the allowed ranges
for _, rule := range cfg.Config().NetworkRules().Rules() {
portRanges := rule.PortRanges()
// sort port ranges, machine config validation ensures that there are no overlaps
slices.SortFunc(portRanges, func(a, b [2]uint16) int {
return cmp.Compare(a[0], b[0])
})
verdict := nethelpers.VerdictDrop
if defaultAction == nethelpers.DefaultActionBlock {
verdict = nethelpers.VerdictAccept
}
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: rule.Subnets(),
ExcludeSubnets: rule.ExceptSubnets(),
Invert: defaultAction == nethelpers.DefaultActionAccept,
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: rule.Protocol(),
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: xslices.Map(portRanges, func(pr [2]uint16) network.PortRange {
return network.PortRange{Lo: pr[0], Hi: pr[1]}
}),
},
},
AnonCounter: true,
Verdict: pointer.To(verdict),
},
)
}
if defaultAction == nethelpers.DefaultActionBlock {
// drop any TCP/UDP new connections
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolTCP,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
network.NfTablesRule{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolUDP,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
)
}
return nil
}
}

View File

@ -75,7 +75,11 @@ func (suite *NfTablesChainConfigTestSuite) injectConfig(block bool) {
cfg, err := container.New(configs...) cfg, err := container.New(configs...)
suite.Require().NoError(err) suite.Require().NoError(err)
suite.Require().NoError(suite.State().Create(suite.Ctx(), config.NewMachineConfig(cfg))) suite.Create(config.NewMachineConfig(cfg))
nodeAddresses := network.NewNodeAddress(network.NamespaceName, network.NodeAddressRoutedID)
nodeAddresses.TypedSpec().Addresses = []netip.Prefix{netip.MustParsePrefix("10.3.4.5/24")}
suite.Create(nodeAddresses)
} }
func (suite *NfTablesChainConfigTestSuite) TestDefaultAccept() { func (suite *NfTablesChainConfigTestSuite) TestDefaultAccept() {
@ -154,6 +158,98 @@ func (suite *NfTablesChainConfigTestSuite) TestDefaultAccept() {
}, },
spec.Rules) spec.Rules)
}) })
ctest.AssertResource(suite, netctrl.PreroutingChainName, func(chain *network.NfTablesChain, asrt *assert.Assertions) {
spec := chain.TypedSpec()
asrt.Equal(nethelpers.ChainTypeFilter, spec.Type)
asrt.Equal(nethelpers.ChainPriorityNATDest-10, spec.Priority)
asrt.Equal(nethelpers.ChainHookPrerouting, spec.Hook)
asrt.Equal(nethelpers.VerdictAccept, spec.Policy)
asrt.Equal(
[]network.NfTablesRule{
{
MatchIIfName: &network.NfTablesIfNameMatch{
InterfaceNames: []string{
"lo",
constants.SideroLinkName,
constants.KubeSpanLinkName,
},
Operator: nethelpers.OperatorEqual,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
{
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{
netip.MustParsePrefix("10.3.4.5/32"),
},
Invert: true,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/8"),
netip.MustParsePrefix("192.168.0.0/16"),
},
ExcludeSubnets: []netip.Prefix{
netip.MustParsePrefix("10.3.0.0/16"),
},
Invert: true,
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolTCP,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{
{
Lo: 10250,
Hi: 10250,
},
},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{
netip.MustParsePrefix("0.0.0.0/0"),
},
Invert: true,
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolTCP,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{
{
Lo: 50000,
Hi: 50000,
},
},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
},
spec.Rules)
})
} }
func (suite *NfTablesChainConfigTestSuite) TestDefaultBlock() { func (suite *NfTablesChainConfigTestSuite) TestDefaultBlock() {
@ -269,9 +365,125 @@ func (suite *NfTablesChainConfigTestSuite) TestDefaultBlock() {
}, },
spec.Rules) spec.Rules)
}) })
ctest.AssertResource(suite, netctrl.PreroutingChainName, func(chain *network.NfTablesChain, asrt *assert.Assertions) {
spec := chain.TypedSpec()
asrt.Equal(nethelpers.ChainTypeFilter, spec.Type)
asrt.Equal(nethelpers.ChainPriorityNATDest-10, spec.Priority)
asrt.Equal(nethelpers.ChainHookPrerouting, spec.Hook)
asrt.Equal(nethelpers.VerdictAccept, spec.Policy)
asrt.Equal(
[]network.NfTablesRule{
{
MatchIIfName: &network.NfTablesIfNameMatch{
InterfaceNames: []string{
"lo",
constants.SideroLinkName,
constants.KubeSpanLinkName,
},
Operator: nethelpers.OperatorEqual,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
{
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{
netip.MustParsePrefix("10.3.4.5/32"),
},
Invert: true,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{
netip.MustParsePrefix("10.0.0.0/8"),
netip.MustParsePrefix("192.168.0.0/16"),
},
ExcludeSubnets: []netip.Prefix{
netip.MustParsePrefix("10.3.0.0/16"),
},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolTCP,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{
{
Lo: 10250,
Hi: 10250,
},
},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{
netip.MustParsePrefix("0.0.0.0/0"),
},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolTCP,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{
{
Lo: 50000,
Hi: 50000,
},
},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolTCP,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
{
MatchConntrackState: &network.NfTablesConntrackStateMatch{
States: []nethelpers.ConntrackState{
nethelpers.ConntrackStateNew,
},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: nethelpers.ProtocolUDP,
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictDrop),
},
},
spec.Rules)
})
} }
func TestNfTablesChainConfig(t *testing.T) { func TestNfTablesChainConfig(t *testing.T) {
t.Parallel()
suite.Run(t, &NfTablesChainConfigTestSuite{ suite.Run(t, &NfTablesChainConfigTestSuite{
DefaultSuite: ctest.DefaultSuite{ DefaultSuite: ctest.DefaultSuite{
Timeout: 5 * time.Second, Timeout: 5 * time.Second,

View File

@ -9,27 +9,31 @@ package api
import ( import (
"context" "context"
"crypto/tls" "crypto/tls"
_ "embed"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/cosi-project/runtime/pkg/safe" "github.com/cosi-project/runtime/pkg/safe"
"github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-cleanhttp"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/siderolabs/talos/internal/integration/base" "github.com/siderolabs/talos/internal/integration/base"
"github.com/siderolabs/talos/pkg/machinery/client" "github.com/siderolabs/talos/pkg/machinery/client"
"github.com/siderolabs/talos/pkg/machinery/constants" "github.com/siderolabs/talos/pkg/machinery/constants"
"github.com/siderolabs/talos/pkg/machinery/nethelpers"
"github.com/siderolabs/talos/pkg/machinery/resources/network" "github.com/siderolabs/talos/pkg/machinery/resources/network"
) )
// FirewallSuite ... // FirewallSuite ...
type FirewallSuite struct { type FirewallSuite struct {
base.APISuite base.K8sSuite
ctx context.Context //nolint:containedctx ctx context.Context //nolint:containedctx
ctxCancel context.CancelFunc ctxCancel context.CancelFunc
@ -113,6 +117,92 @@ func (suite *FirewallSuite) TestKubeletAccess() {
suite.Require().NoError(eg.Wait()) suite.Require().NoError(eg.Wait())
} }
//go:embed testdata/nodeport.yaml
var nodePortServiceYAML []byte
// TestNodePortAccess verifies that without firewall NodePort is available, and not available otherwise.
//
//nolint:gocyclo
func (suite *FirewallSuite) TestNodePortAccess() {
allNodes := suite.DiscoverNodeInternalIPs(suite.ctx)
chain, err := safe.StateGetByID[*network.NfTablesChain](client.WithNode(suite.ctx, allNodes[0]), suite.Client.COSI, "ingress")
firewallEnabled := err == nil
firewallDefaultBlock := firewallEnabled && chain.TypedSpec().Policy == nethelpers.VerdictDrop
// our blocking only works with kube-proxy, so we need to make sure it's running
out, err := suite.Clientset.CoreV1().Pods("kube-system").List(suite.ctx, metav1.ListOptions{LabelSelector: "k8s-app=kube-proxy"})
suite.Require().NoError(err)
if len(out.Items) == 0 {
suite.T().Skip("kube-proxy not running")
}
// create a deployment with a NodePort service
localPathStorage := suite.ParseManifests(nodePortServiceYAML)
suite.T().Cleanup(func() {
cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute)
defer cleanupCancel()
suite.DeleteManifests(cleanUpCtx, localPathStorage)
})
suite.ApplyManifests(suite.ctx, localPathStorage)
// fetch the NodePort service
// read back Service to figure out the ports
svc, err := suite.Clientset.CoreV1().Services("default").Get(suite.ctx, "test-nginx", metav1.GetOptions{})
suite.Require().NoError(err)
var nodePort int
for _, portSpec := range svc.Spec.Ports {
nodePort = int(portSpec.NodePort)
}
suite.Require().NotZero(nodePort)
suite.T().Log("sleeping for 5 seconds to allow kube-proxy to update nftables")
time.Sleep(5 * time.Second)
eg, ctx := errgroup.WithContext(suite.ctx)
for _, node := range allNodes {
eg.Go(func() error {
attemptCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
var d net.Dialer
conn, err := d.DialContext(attemptCtx, "tcp", net.JoinHostPort(node, strconv.Itoa(nodePort)))
if conn != nil {
conn.Close() //nolint:errcheck
}
if firewallDefaultBlock {
if err == nil {
return errors.New("nodePort API should not be available")
}
if !errors.Is(err, os.ErrDeadlineExceeded) && !errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("unexpected error: %w", err)
}
} else if err != nil {
// ignore connection refused, as it's not firewall, but rather service proxy not ready yet
if !strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("nodePort API should be available: %w", err)
}
}
return nil
})
}
suite.Require().NoError(eg.Wait())
}
func init() { func init() {
allSuites = append(allSuites, new(FirewallSuite)) allSuites = append(allSuites, new(FirewallSuite))
} }

View File

@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: test-nginx
name: test-nginx
namespace: default
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: test-nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: test-nginx
spec:
containers:
- image: nginx
imagePullPolicy: Always
name: nginx
ports:
- containerPort: 80
protocol: TCP
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
restartPolicy: Always
terminationGracePeriodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-nginx
name: test-nginx
namespace: default
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: test-nginx
type: NodePort