diff --git a/internal/app/machined/pkg/controllers/network/nftables_chain_config.go b/internal/app/machined/pkg/controllers/network/nftables_chain_config.go index 4075db30f..116bce899 100644 --- a/internal/app/machined/pkg/controllers/network/nftables_chain_config.go +++ b/internal/app/machined/pkg/controllers/network/nftables_chain_config.go @@ -152,6 +152,37 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller }, ) + if cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil { + if cfg.Config().Machine().Features().HostDNS().ForwardKubeDNSToHost() { + hostDNSIP := netip.MustParseAddr(constants.HostDNSAddress) + + // allow traffic to host DNS + for _, protocol := range []nethelpers.Protocol{nethelpers.ProtocolUDP, nethelpers.ProtocolTCP} { + spec.Rules = append(spec.Rules, + network.NfTablesRule{ + MatchSourceAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: xslices.Map( + append(slices.Clone(cfg.Config().Cluster().Network().PodCIDRs()), cfg.Config().Cluster().Network().ServiceCIDRs()...), + netip.MustParsePrefix, + ), + }, + MatchDestinationAddress: &network.NfTablesAddressMatch{ + IncludeSubnets: []netip.Prefix{netip.PrefixFrom(hostDNSIP, hostDNSIP.BitLen())}, + }, + MatchLayer4: &network.NfTablesLayer4Match{ + Protocol: protocol, + MatchDestinationPort: &network.NfTablesPortMatch{ + Ranges: []network.PortRange{{Lo: 53, Hi: 53}}, + }, + }, + AnonCounter: true, + Verdict: pointer.To(nethelpers.VerdictAccept), + }, + ) + } + } + } + if cfg.Config().Cluster() != nil { spec.Rules = append(spec.Rules, // allow Kubernetes pod/service traffic diff --git a/internal/integration/api/common.go b/internal/integration/api/common.go index 60dc436ce..f67854530 100644 --- a/internal/integration/api/common.go +++ b/internal/integration/api/common.go @@ -174,6 +174,12 @@ func (suite *CommonSuite) TestDNSResolver() { suite.Assert().Contains(stderr, "'index.html' saved") if suite.T().Failed() { + suite.LogPodLogsByLabel(suite.ctx, "kube-system", "k8s-app", "kube-dns") + + for _, node := range suite.DiscoverNodeInternalIPs(suite.ctx) { + suite.DumpLogs(suite.ctx, node, "dns-resolve-cache", "google") + } + suite.T().FailNow() } diff --git a/internal/integration/base/k8s.go b/internal/integration/base/k8s.go index 4b7a60b31..09161150f 100644 --- a/internal/integration/base/k8s.go +++ b/internal/integration/base/k8s.go @@ -231,6 +231,21 @@ func (k8sSuite *K8sSuite) WaitForPodToBeRunning(ctx context.Context, timeout tim } } +// LogPodLogsByLabel logs the logs of the pod with the given namespace and label. +func (k8sSuite *K8sSuite) LogPodLogsByLabel(ctx context.Context, namespace, label, value string) { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + podList, err := k8sSuite.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", label, value), + }) + k8sSuite.Require().NoError(err) + + for _, pod := range podList.Items { + k8sSuite.LogPodLogs(ctx, namespace, pod.Name) + } +} + // LogPodLogs logs the logs of the pod with the given namespace and name. func (k8sSuite *K8sSuite) LogPodLogs(ctx context.Context, namespace, podName string) { ctx, cancel := context.WithTimeout(ctx, time.Minute) diff --git a/pkg/cluster/check/default.go b/pkg/cluster/check/default.go index e248b3143..e8d115372 100644 --- a/pkg/cluster/check/default.go +++ b/pkg/cluster/check/default.go @@ -29,7 +29,7 @@ func DefaultClusterChecks() []ClusterCheck { // wait for kube-proxy to report ready func(cluster ClusterInfo) conditions.Condition { return conditions.PollingCondition("kube-proxy to report ready", func(ctx context.Context) error { - present, err := DaemonSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-proxy") + present, replicas, err := DaemonSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-proxy") if err != nil { return err } @@ -38,14 +38,14 @@ func DefaultClusterChecks() []ClusterCheck { return conditions.ErrSkipAssertion } - return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-proxy") - }, 3*time.Minute, 5*time.Second) + return K8sPodReadyAssertion(ctx, cluster, replicas, "kube-system", "k8s-app=kube-proxy") + }, 5*time.Minute, 5*time.Second) }, // wait for coredns to report ready func(cluster ClusterInfo) conditions.Condition { return conditions.PollingCondition("coredns to report ready", func(ctx context.Context) error { - present, err := ReplicaSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns") + present, replicas, err := ReplicaSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns") if err != nil { return err } @@ -54,8 +54,8 @@ func DefaultClusterChecks() []ClusterCheck { return conditions.ErrSkipAssertion } - return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-dns") - }, 3*time.Minute, 5*time.Second) + return K8sPodReadyAssertion(ctx, cluster, replicas, "kube-system", "k8s-app=kube-dns") + }, 5*time.Minute, 5*time.Second) }, // wait for all the nodes to be schedulable diff --git a/pkg/cluster/check/kubernetes.go b/pkg/cluster/check/kubernetes.go index 8a0f86c74..97a8632fa 100644 --- a/pkg/cluster/check/kubernetes.go +++ b/pkg/cluster/check/kubernetes.go @@ -274,7 +274,7 @@ func K8sAllNodesSchedulableAssertion(ctx context.Context, cluster cluster.K8sPro // K8sPodReadyAssertion checks whether all the pods matching label selector are Ready, and there is at least one. // //nolint:gocyclo -func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) error { +func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, replicas int, namespace, labelSelector string) error { clientset, err := cluster.K8sClient(ctx) if err != nil { return err @@ -345,6 +345,10 @@ func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, name } if len(notReadyPods) == 0 { + if len(readyPods) != replicas { + return fmt.Errorf("expected %d ready pods, got %d", replicas, len(readyPods)) + } + return nil } @@ -352,37 +356,49 @@ func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, name } // DaemonSetPresent returns true if there is at least one DaemonSet matching given label selector. -func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, error) { +// +//nolint:dupl +func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) { clientset, err := cluster.K8sClient(ctx) if err != nil { - return false, err + return false, 0, err } dss, err := clientset.AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labelSelector, }) if err != nil { - return false, err + return false, 0, err } - return len(dss.Items) > 0, nil + if len(dss.Items) == 0 { + return false, 0, nil + } + + return true, int(dss.Items[0].Status.DesiredNumberScheduled), nil } // ReplicaSetPresent returns true if there is at least one ReplicaSet matching given label selector. -func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, error) { +// +//nolint:dupl +func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) { clientset, err := cluster.K8sClient(ctx) if err != nil { - return false, err + return false, 0, err } rss, err := clientset.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labelSelector, }) if err != nil { - return false, err + return false, 0, err } - return len(rss.Items) > 0, nil + if len(rss.Items) == 0 { + return false, 0, nil + } + + return true, int(rss.Items[0].Status.Replicas), nil } // K8sControlPlaneStaticPods checks whether all the controlplane nodes are running required Kubernetes static pods.