From dc0ba9873b4d33e557d0d888181ff24e304ab6f4 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 1 Oct 2020 21:09:12 +0200 Subject: [PATCH] patch CoreDNS configmap with host.k3d.internal --- pkg/cluster/cluster.go | 76 ++++++++++++++++++++-------------- pkg/runtimes/docker/network.go | 1 + 2 files changed, 47 insertions(+), 30 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 508bf515..a981c73f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -44,10 +44,15 @@ import ( // - some containerized k3s nodes // - a docker network func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error { + clusterCreateCtx := ctx + clusterPrepCtx := ctx if cluster.CreateClusterOpts.Timeout > 0*time.Second { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout) - defer cancel() + var cancelClusterCreateCtx context.CancelFunc + var cancelClusterPrepCtx context.CancelFunc + clusterCreateCtx, cancelClusterCreateCtx = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout) + clusterPrepCtx, cancelClusterPrepCtx = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout) + defer cancelClusterCreateCtx() + defer cancelClusterPrepCtx() } /* @@ -74,7 +79,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus } // create cluster network or use an existing one - networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(ctx, cluster.Network.Name) + networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(clusterCreateCtx, cluster.Network.Name) if err != nil { log.Errorln("Failed to create cluster network") return err @@ -102,7 +107,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus */ if !cluster.CreateClusterOpts.DisableImageVolume { imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name) - if err := runtime.CreateVolume(ctx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil { + if err := runtime.CreateVolume(clusterCreateCtx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil { log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name) return err } @@ -157,7 +162,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus // create node log.Infof("Creating node '%s'", node.Name) - if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil { + if err := NodeCreate(clusterCreateCtx, runtime, node, k3d.NodeCreateOpts{}); err != nil { log.Errorln("Failed to create node") return err } @@ -189,13 +194,13 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus // wait for the initnode to come up before doing anything else for { select { - case <-ctx.Done(): + case <-clusterCreateCtx.Done(): log.Errorln("Failed to bring up initializing server node in time") - return fmt.Errorf(">>> %w", ctx.Err()) + return fmt.Errorf(">>> %w", clusterCreateCtx.Err()) default: } log.Debugln("Waiting for initializing server node...") - logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{}) + logreader, err := runtime.GetNodeLogs(clusterCreateCtx, cluster.InitNode, time.Time{}) if err != nil { if logreader != nil { logreader.Close() @@ -219,7 +224,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus } // vars to support waiting for server nodes to be ready - waitForServerWaitgroup, ctx := errgroup.WithContext(ctx) + waitForServerWaitgroup, clusterCreateCtx := errgroup.WithContext(clusterCreateCtx) // create all other nodes, but skip the init node for _, node := range cluster.Nodes { @@ -257,28 +262,11 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus // TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"` // ... by scanning for this line in logs and restarting the container in case it appears log.Debugf("Starting to wait for server node '%s'", serverNode.Name) - return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{}) + return NodeWaitForLogMessage(clusterCreateCtx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{}) }) } } - /* - * Networking Magic - */ - - // add extra host - hostIP, err := GetHostIP(ctx, runtime, cluster) - if err != nil { - return err - } - hostsEntry := fmt.Sprintf("%s %s", hostIP, k3d.DefaultK3dInternalHostRecord) - log.Debugf("Adding extra host entry '%s'", hostsEntry) - for _, node := range cluster.Nodes { - if err := runtime.ExecInNode(ctx, node, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)}); err != nil { - log.Warnf("Failed to add extra entry '%s' to /etc/hosts in node '%s'", hostsEntry, node.Name) - } - } - /* * Auxiliary Containers */ @@ -339,7 +327,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus } cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback log.Infof("Creating LoadBalancer '%s'", lbNode.Name) - if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { + if err := NodeCreate(clusterCreateCtx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil { log.Errorln("Failed to create loadbalancer") return err } @@ -348,7 +336,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus // TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"` // ... by scanning for this line in logs and restarting the container in case it appears log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name) - return NodeWaitForLogMessage(ctx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{}) + return NodeWaitForLogMessage(clusterCreateCtx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{}) }) } } else { @@ -362,6 +350,34 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus return fmt.Errorf("Failed to bring up cluster") } + /********************************** + * Additional Cluster Preparation * + **********************************/ + + /* + * Networking Magic + */ + + // add extra host + hostIP, err := GetHostIP(clusterPrepCtx, runtime, cluster) + if err != nil { + log.Errorln("Failed to get HostIP") + return err + } + hostsEntry := fmt.Sprintf("%s %s", hostIP, k3d.DefaultK3dInternalHostRecord) + log.Debugf("Adding extra host entry '%s'...", hostsEntry) + for _, node := range cluster.Nodes { + if err := runtime.ExecInNode(clusterPrepCtx, node, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)}); err != nil { + log.Warnf("Failed to add extra entry '%s' to /etc/hosts in node '%s'", hostsEntry, node.Name) + } + } + + patchCmd := `test=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$test"` + err := runtime.ExecInNode(clusterPrepCtx, cluster.Nodes[0], []string{"sh", "-c", patchCmd}) + if err != nil { + log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err) + } + return nil } diff --git a/pkg/runtimes/docker/network.go b/pkg/runtimes/docker/network.go index 5343d4d9..1ab18da3 100644 --- a/pkg/runtimes/docker/network.go +++ b/pkg/runtimes/docker/network.go @@ -110,6 +110,7 @@ func GetNetwork(ctx context.Context, ID string) (types.NetworkResource, error) { func GetGatewayIP(ctx context.Context, network string) (net.IP, error) { bridgeNetwork, err := GetNetwork(ctx, network) if err != nil { + log.Errorf("Failed to get bridge network with name '%s'", network) return nil, err }