fix: host DNS access with firewall enabled

Explicitly enable access to host DNS from pod/service IPs.

Also fix the Kubernetes health checks to assert number of ready pods to
match expectation, otherwise the check might skip a pod (e.g.
`kube-proxy` one) which is not ready, allowing the test to proceed too
early.

Update DNS test to print more logs on error.

Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
This commit is contained in:
Andrey Smirnov 2024-08-27 15:44:14 +04:00
parent 4834a61a8e
commit a9551b7caa
No known key found for this signature in database
GPG Key ID: FE042E3D4085A811
5 changed files with 83 additions and 15 deletions

View File

@ -152,6 +152,37 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller
},
)
if cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil {
if cfg.Config().Machine().Features().HostDNS().ForwardKubeDNSToHost() {
hostDNSIP := netip.MustParseAddr(constants.HostDNSAddress)
// allow traffic to host DNS
for _, protocol := range []nethelpers.Protocol{nethelpers.ProtocolUDP, nethelpers.ProtocolTCP} {
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
append(slices.Clone(cfg.Config().Cluster().Network().PodCIDRs()), cfg.Config().Cluster().Network().ServiceCIDRs()...),
netip.MustParsePrefix,
),
},
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{netip.PrefixFrom(hostDNSIP, hostDNSIP.BitLen())},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: protocol,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{{Lo: 53, Hi: 53}},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
}
}
}
if cfg.Config().Cluster() != nil {
spec.Rules = append(spec.Rules,
// allow Kubernetes pod/service traffic

View File

@ -174,6 +174,12 @@ func (suite *CommonSuite) TestDNSResolver() {
suite.Assert().Contains(stderr, "'index.html' saved")
if suite.T().Failed() {
suite.LogPodLogsByLabel(suite.ctx, "kube-system", "k8s-app", "kube-dns")
for _, node := range suite.DiscoverNodeInternalIPs(suite.ctx) {
suite.DumpLogs(suite.ctx, node, "dns-resolve-cache", "google")
}
suite.T().FailNow()
}

View File

@ -231,6 +231,21 @@ func (k8sSuite *K8sSuite) WaitForPodToBeRunning(ctx context.Context, timeout tim
}
}
// LogPodLogsByLabel logs the logs of the pod with the given namespace and label.
func (k8sSuite *K8sSuite) LogPodLogsByLabel(ctx context.Context, namespace, label, value string) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
podList, err := k8sSuite.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", label, value),
})
k8sSuite.Require().NoError(err)
for _, pod := range podList.Items {
k8sSuite.LogPodLogs(ctx, namespace, pod.Name)
}
}
// LogPodLogs logs the logs of the pod with the given namespace and name.
func (k8sSuite *K8sSuite) LogPodLogs(ctx context.Context, namespace, podName string) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)

View File

@ -29,7 +29,7 @@ func DefaultClusterChecks() []ClusterCheck {
// wait for kube-proxy to report ready
func(cluster ClusterInfo) conditions.Condition {
return conditions.PollingCondition("kube-proxy to report ready", func(ctx context.Context) error {
present, err := DaemonSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
present, replicas, err := DaemonSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
if err != nil {
return err
}
@ -38,14 +38,14 @@ func DefaultClusterChecks() []ClusterCheck {
return conditions.ErrSkipAssertion
}
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
}, 3*time.Minute, 5*time.Second)
return K8sPodReadyAssertion(ctx, cluster, replicas, "kube-system", "k8s-app=kube-proxy")
}, 5*time.Minute, 5*time.Second)
},
// wait for coredns to report ready
func(cluster ClusterInfo) conditions.Condition {
return conditions.PollingCondition("coredns to report ready", func(ctx context.Context) error {
present, err := ReplicaSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns")
present, replicas, err := ReplicaSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns")
if err != nil {
return err
}
@ -54,8 +54,8 @@ func DefaultClusterChecks() []ClusterCheck {
return conditions.ErrSkipAssertion
}
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-dns")
}, 3*time.Minute, 5*time.Second)
return K8sPodReadyAssertion(ctx, cluster, replicas, "kube-system", "k8s-app=kube-dns")
}, 5*time.Minute, 5*time.Second)
},
// wait for all the nodes to be schedulable

View File

@ -274,7 +274,7 @@ func K8sAllNodesSchedulableAssertion(ctx context.Context, cluster cluster.K8sPro
// K8sPodReadyAssertion checks whether all the pods matching label selector are Ready, and there is at least one.
//
//nolint:gocyclo
func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) error {
func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, replicas int, namespace, labelSelector string) error {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return err
@ -345,6 +345,10 @@ func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, name
}
if len(notReadyPods) == 0 {
if len(readyPods) != replicas {
return fmt.Errorf("expected %d ready pods, got %d", replicas, len(readyPods))
}
return nil
}
@ -352,37 +356,49 @@ func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, name
}
// DaemonSetPresent returns true if there is at least one DaemonSet matching given label selector.
func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, error) {
//
//nolint:dupl
func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return false, err
return false, 0, err
}
dss, err := clientset.AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return false, err
return false, 0, err
}
return len(dss.Items) > 0, nil
if len(dss.Items) == 0 {
return false, 0, nil
}
return true, int(dss.Items[0].Status.DesiredNumberScheduled), nil
}
// ReplicaSetPresent returns true if there is at least one ReplicaSet matching given label selector.
func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, error) {
//
//nolint:dupl
func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return false, err
return false, 0, err
}
rss, err := clientset.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return false, err
return false, 0, err
}
return len(rss.Items) > 0, nil
if len(rss.Items) == 0 {
return false, 0, nil
}
return true, int(rss.Items[0].Status.Replicas), nil
}
// K8sControlPlaneStaticPods checks whether all the controlplane nodes are running required Kubernetes static pods.