diff --git a/Makefile b/Makefile index 1f9427ceb..b77f631d6 100644 --- a/Makefile +++ b/Makefile @@ -100,6 +100,14 @@ publishdevoperator: ## Build and publish k8s-operator image to location specifie @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=operator ./build_docker.sh +publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO} + @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) + @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) + @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) + @test "${REPO}" != "tailscale/k8s-nameserver" || (echo "REPO=... must not be tailscale/k8s-nameserver" && exit 1) + @test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1) + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh + help: ## Show this help @echo "\nSpecify a command. The choices are:\n" @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' diff --git a/build_docker.sh b/build_docker.sh index 30e00d37f..f2d1f9e46 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -70,6 +70,21 @@ case "$TARGET" in --target="${PLATFORM}" \ /usr/local/bin/operator ;; + k8s-nameserver) + DEFAULT_REPOS="tailscale/k8s-nameserver" + REPOS="${REPOS:-${DEFAULT_REPOS}}" + go run github.com/tailscale/mkctr \ + --gopaths="tailscale.com/cmd/k8s-nameserver:/usr/local/bin/k8s-nameserver" \ + --ldflags=" \ + -X tailscale.com/version.longStamp=${VERSION_LONG} \ + -X tailscale.com/version.shortStamp=${VERSION_SHORT} \ + -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ + --base="${BASE}" \ + --tags="${TAGS}" \ + --repos="${REPOS}" \ + --push="${PUSH}" \ + /usr/local/bin/k8s-nameserver + ;; *) echo "unknown target: $TARGET" exit 1 diff --git a/cmd/k8s-nameserver/main.go b/cmd/k8s-nameserver/main.go new file mode 100644 index 000000000..f6b275950 --- /dev/null +++ b/cmd/k8s-nameserver/main.go @@ -0,0 +1,240 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +// k8s-nameserver is a simple nameserver implementation meant to be used with +// k8s-operator to allow to resolve magicDNS names of Tailscale nodes in a +// Kubernetes cluster. + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net" + "net/netip" + "os" + "path/filepath" + + "github.com/fsnotify/fsnotify" + operatorutils "tailscale.com/k8s-operator" + "tailscale.com/net/dns/resolver" + "tailscale.com/net/tsdial" + "tailscale.com/types/logger" + "tailscale.com/util/dnsname" +) + +const ( + defaultDNSConfigDir = "/config" + defaultDNSFile = "dns.json" + udpEndpoint = ":1053" + + kubeletMountedConfigLn = "..data" +) + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.Printf + + res := resolver.New(logger, nil, nil, &tsdial.Dialer{Logf: logger}, nil) + + var configReader configReaderFunc = func() ([]byte, error) { + if contents, err := os.ReadFile(filepath.Join(defaultDNSConfigDir, defaultDNSFile)); err == nil { + return contents, nil + + } else if os.IsNotExist(err) { + return nil, nil + + } else { + return nil, err + } + } + + c := make(chan string) + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatalf("error creating a new configfile watcher: %v", err) + } + defer watcher.Close() + // kubelet mounts configmap to a Pod using a series of symlinks, one of + // which is /..data that Kubernetes recommends consumers to + // use if they need to monitor changes + // https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61 + // TODO (irbekrm): we need e2e tests to make sure that this keeps working for new kube versions etc + toWatch := filepath.Join(defaultDNSConfigDir, kubeletMountedConfigLn) + go func() { + logger("starting file watch for %s", defaultDNSConfigDir) + if err != nil { + log.Fatalf("error starting a new configfile watcher: %v", err) + } + for { + select { + case event, ok := <-watcher.Events: + if !ok { + logger("watcher finished") + cancel() + return + } + + if event.Name == toWatch { + msg := fmt.Sprintf("config update received: %s", event) + logger(msg) + c <- msg + } + + case err, ok := <-watcher.Errors: + if !ok { + logger("errors watcher finished: %v", err) + cancel() + return + } + if err != nil { + logger("error watching directory: %w", err) + cancel() + return + } + } + } + }() + if err = watcher.Add(defaultDNSConfigDir); err != nil { + log.Fatalf("failed setting up file watch for DNS config: %v", err) + } + + ns := &nameserver{ + configReader: configReader, + configWatcher: c, + logger: logger, + res: res, + } + + if err := ns.run(ctx, cancel); err != nil { + log.Fatalf("error running nameserver: %v", err) + } + + addr, err := net.ResolveUDPAddr("udp", udpEndpoint) + if err != nil { + log.Fatalf("error resolving UDP address: %v", err) + } + conn, err := net.ListenUDP("udp", addr) + if err != nil { + log.Fatalf("error opening udp connection: %v", err) + } + go func() { + <-ctx.Done() + conn.Close() + }() + + logger("k8s-nameserver listening on: %v", addr) + + for { + payloadBuff := make([]byte, 10000) + metadataBuff := make([]byte, 512) + _, _, _, addr, err := conn.ReadMsgUDP(payloadBuff, metadataBuff) + if err != nil { + logger(fmt.Sprintf("error reading UDP message: %v", err)) + continue + } + dnsAnswer, err := ns.query(ctx, payloadBuff, addr.AddrPort()) + if err != nil { + // reply with the dnsAnswer anyway- in some cases + // resolver might have written some useful data there + } + conn.WriteToUDP(dnsAnswer, addr) + } +} + +type nameserver struct { + configReader configReaderFunc + configWatcher <-chan string + res *resolver.Resolver + logger logger.Logf +} + +type configReaderFunc func() ([]byte, error) + +// run ensures that resolver configuration is up to date with regards to its +// source. will update config once before returning and keep monitoring it in a +// thread. +func (n *nameserver) run(ctx context.Context, cancelF context.CancelFunc) error { + go func() { + for { + select { + case <-ctx.Done(): + n.logger("nameserver exiting") + return + case <-n.configWatcher: + // TODO (irbekrm): this does not actually log anything + n.logger("attempting to update resolver config...") + if err := n.updateResolverConfig(); err != nil { + n.logger("error updating resolver config: %w", err) + cancelF() + } + // TODO (irbekrm): this does not actually log anything + n.logger("successfully updated resolver config") + } + } + }() + if err := n.updateResolverConfig(); err != nil { + return fmt.Errorf("error updating resolver config: %w", err) + } + n.logger("successfully updated resolver config") + return nil +} + +func (n *nameserver) query(ctx context.Context, payload []byte, add netip.AddrPort) ([]byte, error) { + return n.res.Query(ctx, payload, "udp", add) +} + +func (n *nameserver) updateResolverConfig() error { + dnsCfgBytes, err := n.configReader() + if err != nil { + n.logger("error reading config: %v", err) + return err + } + if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 { + n.logger("no DNS config provided") + return nil + } + dnsCfg := &operatorutils.TSHosts{} + err = json.Unmarshal(dnsCfgBytes, dnsCfg) + if err != nil { + n.logger("error unmarshaling json: %v", err) + return err + } + if dnsCfg.Hosts == nil || len(dnsCfg.Hosts) < 1 { + n.logger("no host records found") + } + c := resolver.Config{} + + // Ensure that queries for ts.net subdomains are never forwarded to + // external resolvers + c.LocalDomains = []dnsname.FQDN{"ts.net", "ts.net."} + + c.Hosts = make(map[dnsname.FQDN][]netip.Addr) + for fqdn, ips := range dnsCfg.Hosts { + fqdn, err := dnsname.ToFQDN(fqdn) + if err != nil { + n.logger("invalid DNS config: cannot convert %s to FQDN: %v", fqdn, err) + return err + } + for _, ip := range ips { + ip, err := netip.ParseAddr(ip) + if err != nil { + n.logger("invalid DNS config: cannot convert %s to netip.Addr: %v", ip, err) + return err + } + c.Hosts[fqdn] = []netip.Addr{ip} + } + } + // resolver will lock config so this is safe + n.res.SetConfig(c) + + // TODO (irbekrm): get a diff and log when/if resolver config is actually being changed + + return nil +} diff --git a/cmd/k8s-nameserver/main_test.go b/cmd/k8s-nameserver/main_test.go new file mode 100644 index 000000000..65f2ba143 --- /dev/null +++ b/cmd/k8s-nameserver/main_test.go @@ -0,0 +1,181 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "log" + "net/netip" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/net/dns/resolver" + "tailscale.com/net/tsdial" +) + +func TestNameserver(t *testing.T) { + + // Setup + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hostConfig := `{"hosts":{"foo.bar.ts.net.": "10.20.30.40"}}` + + var mockConfigReader configReaderFunc = func() ([]byte, error) { + return []byte(hostConfig), nil + } + configWatcher := make(chan string) + logger := log.Printf + res := resolver.New(logger, nil, nil, &tsdial.Dialer{Logf: logger}, nil) + + ns := &nameserver{ + configReader: mockConfigReader, + configWatcher: configWatcher, + logger: logger, + res: *res, + } + assert.NoError(t, ns.run(ctx, cancel), "error running nameserver") + + // Test that nameserver can resolve a DNS name from provided hosts config + + wantedResponse := dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: 0x0, + Response: true, + OpCode: 0, + Authoritative: true, + Truncated: false, + RecursionDesired: false, + RecursionAvailable: false, + AuthenticData: false, + CheckingDisabled: false, + RCode: dnsmessage.RCodeSuccess, + }, + + Answers: []dnsmessage.Resource{{ + Header: dnsmessage.ResourceHeader{ + Name: dnsmessage.MustNewName("foo.bar.ts.net."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + TTL: 0x258, + Length: 0x4, + }, + Body: &dnsmessage.AResource{ + A: [4]byte{10, 20, 30, 40}, + }, + }}, + Questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("foo.bar.ts.net."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + Additionals: []dnsmessage.Resource{}, + Authorities: []dnsmessage.Resource{}, + } + testQuery := dnsmessage.Message{ + Header: dnsmessage.Header{Authoritative: true}, + Questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("foo.bar.ts.net."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + } + testAddr, err := netip.ParseAddrPort("10.40.30.20:0") + assert.NoError(t, err, "error parsing IP address") + packedTestQuery, err := testQuery.Pack() + assert.NoError(t, err, "error parsing DNS query") + answer, err := ns.query(ctx, packedTestQuery, testAddr) + assert.NoError(t, err, "error querying nameserver") + var gotResponse dnsmessage.Message + assert.NoError(t, gotResponse.Unpack(answer), "error unpacking DNS answer") + assert.Equal(t, gotResponse, wantedResponse) + + // Test that nameserver's hosts config gets dynamically updated + + newHostConfig := `{"hosts": {"baz.bar.ts.net.": "10.40.30.20"}}` + var newMockConfigReader configReaderFunc = func() ([]byte, error) { + return []byte(newHostConfig), nil + } + ns.configReader = newMockConfigReader + + timeout := 3 * time.Second + timer := time.NewTimer(timeout) + select { + case <-timer.C: + t.Fatalf("nameserver failed to process config update within %v", timeout) + case configWatcher <- "config update": + } + wantedResponse = dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: 0x0, + Response: true, + OpCode: 0, + Authoritative: true, + Truncated: false, + RecursionDesired: false, + RecursionAvailable: false, + AuthenticData: false, + CheckingDisabled: false, + RCode: dnsmessage.RCodeSuccess, + }, + + Answers: []dnsmessage.Resource{{ + Header: dnsmessage.ResourceHeader{ + Name: dnsmessage.MustNewName("baz.bar.ts.net."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + TTL: 0x258, + Length: 0x4, + }, + Body: &dnsmessage.AResource{ + A: [4]byte{10, 40, 30, 20}, + }, + }}, + Questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("baz.bar.ts.net."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + Additionals: []dnsmessage.Resource{}, + Authorities: []dnsmessage.Resource{}, + } + testQuery = dnsmessage.Message{ + Header: dnsmessage.Header{Authoritative: true}, + Questions: []dnsmessage.Question{ + { + Name: dnsmessage.MustNewName("baz.bar.ts.net."), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + }, + } + packedTestQuery, err = testQuery.Pack() + assert.NoError(t, err, "error parsing DNS query") + + // retry a couple times as the nameserver will have eventually processed + // the update + assert.Eventually(t, func() bool { + answer, err = ns.query(ctx, packedTestQuery, testAddr) + assert.NoError(t, err, "error querying nameserver") + gotResponse = dnsmessage.Message{} + + assert.NoError(t, gotResponse.Unpack(answer), "error unpacking DNS answer") + if reflect.DeepEqual(wantedResponse, gotResponse) { + return true + } + return false + }, time.Second*5, time.Second) +} diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 8ea07e808..5d52ebfda 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -24,6 +24,9 @@ rules: - apiGroups: ["tailscale.com"] resources: ["connectors", "connectors/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["dnsconfigs", "dnsconfigs/status"] + verbs: ["get", "list", "watch", "update"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -45,10 +48,16 @@ metadata: namespace: {{ .Release.Namespace }} rules: - apiGroups: [""] - resources: ["secrets"] + resources: ["secrets", "serviceaccounts", "configmaps"] verbs: ["*"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "deployments"] + verbs: ["*"] +- apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml new file mode 100644 index 000000000..dad4b1b9e --- /dev/null +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -0,0 +1,97 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: dnsconfigs.tailscale.com +spec: + group: tailscale.com + names: + kind: DNSConfig + listKind: DNSConfigList + plural: dnsconfigs + shortNames: + - dc + singular: dnsconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Status of the deployed Connector resources. + jsonPath: .status.nameserverStatus.ip + name: NameserverIP + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - nameserver + properties: + nameserver: + type: object + properties: + image: + type: object + properties: + repo: + type: string + tag: + type: string + status: + type: object + properties: + conditions: + description: 'TODO: rename ConnectorCondition to sth like ComponentCondition' + type: array + items: + description: ConnectorCondition contains condition information for a Connector. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Connector. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', 'Unknown'). + type: string + type: + description: Type of the condition, known values are (`SubnetRouterReady`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + nameserverStatus: + type: object + properties: + ip: + type: string + served: true + storage: true + subresources: + status: {} diff --git a/cmd/k8s-operator/deploy/examples/dnsconfig.yaml b/cmd/k8s-operator/deploy/examples/dnsconfig.yaml new file mode 100644 index 000000000..a12f18d75 --- /dev/null +++ b/cmd/k8s-operator/deploy/examples/dnsconfig.yaml @@ -0,0 +1,9 @@ +apiVersion: tailscale.com/v1alpha1 +kind: DNSConfig +metadata: + name: ts-dns +spec: + nameserver: + image: + repo: gcr.io/csi-test-290908/nameserver + tag: v0.0.18dns \ No newline at end of file diff --git a/cmd/k8s-operator/deploy/manifests/nameserver/cm.yaml b/cmd/k8s-operator/deploy/manifests/nameserver/cm.yaml new file mode 100644 index 000000000..0eb812270 --- /dev/null +++ b/cmd/k8s-operator/deploy/manifests/nameserver/cm.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: dnsconfig + labels: + app.kubernetes.io/name: tailscale + app.kubernetes.io/component: nameserver \ No newline at end of file diff --git a/cmd/k8s-operator/deploy/manifests/nameserver/deploy.yaml b/cmd/k8s-operator/deploy/manifests/nameserver/deploy.yaml new file mode 100644 index 000000000..1d069f58d --- /dev/null +++ b/cmd/k8s-operator/deploy/manifests/nameserver/deploy.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nameserver +spec: + replicas: 1 + revisionHistoryLimit: 5 + selector: + matchLabels: + app: nameserver + strategy: + type: Recreate + template: + metadata: + labels: + app: nameserver + spec: + containers: + - imagePullPolicy: IfNotPresent + name: nameserver + ports: + - name: udp + protocol: UDP + containerPort: 1053 + volumeMounts: + - name: dnsconfig + mountPath: /config + restartPolicy: Always + serviceAccount: nameserver + serviceAccountName: nameserver + volumes: + - name: dnsconfig + configMap: + name: dnsconfig \ No newline at end of file diff --git a/cmd/k8s-operator/deploy/manifests/nameserver/sa.yaml b/cmd/k8s-operator/deploy/manifests/nameserver/sa.yaml new file mode 100644 index 000000000..ed8a06eca --- /dev/null +++ b/cmd/k8s-operator/deploy/manifests/nameserver/sa.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nameserver \ No newline at end of file diff --git a/cmd/k8s-operator/deploy/manifests/nameserver/svc.yaml b/cmd/k8s-operator/deploy/manifests/nameserver/svc.yaml new file mode 100644 index 000000000..101a6f705 --- /dev/null +++ b/cmd/k8s-operator/deploy/manifests/nameserver/svc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: nameserver +spec: + selector: + app: nameserver + ports: + - name: udp + targetPort: 1053 + port: 53 + protocol: UDP \ No newline at end of file diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 057117b0e..fb864ad4e 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -191,6 +191,16 @@ rules: - list - watch - update + - apiGroups: + - tailscale.com + resources: + - dnsconfigs + - dnsconfigs/status + verbs: + - get + - list + - watch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -215,12 +225,29 @@ rules: - "" resources: - secrets + - serviceaccounts + - configmaps verbs: - '*' + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch - apiGroups: - apps resources: - statefulsets + - deployments + verbs: + - '*' + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices verbs: - '*' --- diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go new file mode 100644 index 000000000..f9924f7bc --- /dev/null +++ b/cmd/k8s-operator/dnsrecords.go @@ -0,0 +1,324 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// tailscale-operator provides a way to expose services running in a Kubernetes +// cluster to your Tailnet and to make Tailscale nodes available to cluster +// workloads +package main + +import ( + "context" + "encoding/json" + "fmt" + "slices" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + networkingv1 "k8s.io/api/networking/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/apitype" + kube "tailscale.com/k8s-operator" + operatorutils "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/util/mak" +) + +const ( + dnsConfigKey = "dns.json" + configMapName = "dnsconfig" + + dnsRecordsRecocilerFinalizer = "tailscale.com/dns-records-reconciler" + annotationTSMagicDNSName = "tailscale.com/magic-dns" +) + +// dnsRecordsReconciler knows how to update ts.net nameserver with records +// of a tailnet MagicDNS name to kube Service endpoints. +type dnsRecordsReconciler struct { + client.Client + // namespace in which tailscale resources get provisioned + tsNamespace string + // localClient knows how to talk to tailscaled local API + localAPIClient localClient + logger *zap.SugaredLogger + isDefaultLoadBalancer bool +} + +type localClient interface { + WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) +} + +func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := dnsRR.logger.With("EndpointSlice", req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + // Check that this is an EndpointSlice is for a headless Service for a + // tailscale proxy type that we support creating DNS records for. + // Currently this is cluster egress or L7 cluster ingress. + eps := new(discoveryv1.EndpointSlice) + err = dnsRR.Get(ctx, req.NamespacedName, eps) + if apierrors.IsNotFound(err) { + logger.Debugf("EndpointSlice not found") + return reconcile.Result{}, nil + } + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err) + } + if !eps.DeletionTimestamp.IsZero() { + logger.Debug("EndpointSlice is being deleted, clean up resources") + return reconcile.Result{}, dnsRR.maybeCleanup(ctx, eps, logger) + } + + maybeHeadlessSvcName, ok := eps.Labels[discoveryv1.LabelServiceName] + if !ok { + logger.Debugf("EndpointSlice does not have %s label, do nothing", discoveryv1.LabelServiceName) + return reconcile.Result{}, nil + } + maybyHeadlessSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: maybeHeadlessSvcName, Namespace: dnsRR.tsNamespace}} + if err = dnsRR.Get(ctx, client.ObjectKeyFromObject(maybyHeadlessSvc), maybyHeadlessSvc); err != nil { + return reconcile.Result{}, fmt.Errorf("error retrieving Service for EndpointSlice: %w", err) + } + ok, err = dnsRR.isHeadlessSvcForSupportedProxy(ctx, maybyHeadlessSvc) + if err != nil { + return reconcile.Result{}, fmt.Errorf("error validating proxy for DNS records: %w", err) + } + if !ok { + logger.Debugf("EndpointSlice is not for a proxy type that we create DNS records for, do nothing") + return reconcile.Result{}, nil + } + + dnsCfgLst := new(tsapi.DNSConfigList) + if err = dnsRR.List(ctx, dnsCfgLst); err != nil { + return reconcile.Result{}, fmt.Errorf("error listing DNSConfigs: %w", err) + } + + if len(dnsCfgLst.Items) == 0 { + logger.Debugf("DNSConfig does not exist, not creating DNS records") + return reconcile.Result{}, nil + } + + if len(dnsCfgLst.Items) > 1 { + logger.Errorf("Invalid cluster state - more than one DNSConfig found in cluster. Please ensure no more than one exists") + return reconcile.Result{}, nil + } + + dnsCfg := dnsCfgLst.Items[0] + + if !kube.DNSCfgIsReady(&dnsCfg) { + logger.Info("DNSConfig is not ready yet, waiting...") + return reconcile.Result{}, nil + } + return reconcile.Result{}, dnsRR.maybeProvision(ctx, eps, logger) +} + +func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, eps *discoveryv1.EndpointSlice, logger *zap.SugaredLogger) error { + logger.Debugf("provisioning record") + if eps == nil { + return nil + } + fqdn, err := dnsRR.fqdnForDNSRecord(ctx, eps, logger) + if err != nil { + return fmt.Errorf("error determining DNS name for record: %w", err) + } + if fqdn == "" { + logger.Debugf("MagicDNS name does not (yet) exist, not provisioning DNS record") + return nil // a new reconcile will be triggered once it's added + } + oldEps := eps.DeepCopy() + if !slices.Contains(eps.Finalizers, dnsRecordsRecocilerFinalizer) { + eps.Finalizers = append(eps.Finalizers, dnsRecordsRecocilerFinalizer) + } + if _, ok := eps.Annotations[annotationTSMagicDNSName]; !ok { + mak.Set(&eps.Annotations, annotationTSMagicDNSName, fqdn) // label eps with the assocated MagicDNS name to make record cleanup easier + } + if !apiequality.Semantic.DeepEqual(oldEps, eps) { + logger.Infof("provisioning DNS record for MagicDNS name: %s", fqdn) // this will be printed exactly once + if err := dnsRR.Update(ctx, eps); err != nil { + return fmt.Errorf("error updating EndpointSlice metadata: %w", err) + } + } + + ips := make([]string, 0) + for _, ep := range eps.Endpoints { + ips = append(ips, ep.Addresses...) + } + if len(ips) == 0 { + logger.Debugf("No endpoint addresses found") + return nil // a new reconcile will be triggered once the EndpointSlice is updated with addresses + } + updateFunc := func(cfg *operatorutils.TSHosts) { + mak.Set(&cfg.Hosts, fqdn, ips) + } + if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { + return fmt.Errorf("error updating DNS records: %w", err) + } + return nil +} + +func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, eps *discoveryv1.EndpointSlice, logger *zap.SugaredLogger) error { + ix := slices.Index(eps.Finalizers, dnsRecordsRecocilerFinalizer) + if ix == -1 { + logger.Debugf("no finalizer, nothing to do") + return nil + } + cm := &corev1.ConfigMap{} + err := h.Client.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: h.tsNamespace}, cm) + if apierrors.IsNotFound(err) { // If the ConfigMap with the DNS config does not exist, just remove the finalizer + logger.Debug("CM not found") + return h.removeEPSFinalizer(ctx, eps) + } + if err != nil { + return fmt.Errorf("error retrieving ConfigMap: %w", err) + } + _, ok := cm.Data[dnsConfigKey] + if !ok { + logger.Debug("config key not found") + return h.removeEPSFinalizer(ctx, eps) + } + fqdn, ok := eps.GetAnnotations()[annotationTSMagicDNSName] + if !ok || fqdn == "" { + return h.removeEPSFinalizer(ctx, eps) + } + logger.Infof("removing DNS record for MagicDNS name %s", fqdn) + updateFunc := func(cfg *operatorutils.TSHosts) { + delete(cfg.Hosts, fqdn) + } + if err = h.updateDNSConfig(ctx, updateFunc); err != nil { + return fmt.Errorf("error updating DNS config: %w", err) + } + return h.removeEPSFinalizer(ctx, eps) +} + +func (dnsRR *dnsRecordsReconciler) isHeadlessSvcForSupportedProxy(ctx context.Context, svc *corev1.Service) (bool, error) { + if isManagedByType(svc, "ingress") { + return true, nil + } + if !isManagedByType(svc, "svc") { + return false, nil + } + parentNSName := parentFromObjectLabels(svc) + parentSvc := new(corev1.Service) + if err := dnsRR.Get(ctx, parentNSName, parentSvc); err != nil { + return false, fmt.Errorf("error retrieving parent Service: %w", err) + } + if ip := tailnetTargetAnnotation(parentSvc); ip != "" { + return true, nil // egress Service + } + if _, ok := parentSvc.GetAnnotations()[AnnotationTailnetTargetFQDN]; ok { + return true, nil // egress Service + } + return false, nil // ingress Service +} + +func (dnsRR *dnsRecordsReconciler) removeEPSFinalizer(ctx context.Context, eps *discoveryv1.EndpointSlice) error { + idx := slices.Index(eps.Finalizers, dnsRecordsRecocilerFinalizer) + if idx == -1 { + return nil + } + eps.Finalizers = append(eps.Finalizers[:idx], eps.Finalizers[idx+1:]...) + return dnsRR.Update(ctx, eps) +} + +func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, eps *discoveryv1.EndpointSlice, logger *zap.SugaredLogger) (string, error) { + svcName, ok := eps.Labels[discoveryv1.LabelServiceName] // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership + if !ok { + logger.Debugf("EndpointSlice is not managed by a Service") + return "", nil + } + maybeHeadlessSvc := new(corev1.Service) + if err := dnsRR.Get(ctx, types.NamespacedName{Namespace: dnsRR.tsNamespace, Name: svcName}, maybeHeadlessSvc); err != nil { + return "", fmt.Errorf("error retrieving owning Service for EndpointSlice: %w", err) + } + parentName := parentFromObjectLabels(maybeHeadlessSvc) + if isManagedByType(maybeHeadlessSvc, "ingress") { + ing := new(networkingv1.Ingress) + if err := dnsRR.Get(ctx, parentName, ing); err != nil { + return "", err + } + if len(ing.Status.LoadBalancer.Ingress) == 0 { + return "", nil + } + return ing.Status.LoadBalancer.Ingress[0].Hostname, nil + } + if isManagedByType(maybeHeadlessSvc, "svc") { + svc := new(corev1.Service) + if err := dnsRR.Get(ctx, parentName, svc); err != nil { + return "", err + } + return dnsRR.fqdnForDNSRecordFromService(ctx, svc) + } + return "", nil +} + +func (h *dnsRecordsReconciler) updateDNSConfig(ctx context.Context, update func(*operatorutils.TSHosts)) error { + cm := &corev1.ConfigMap{} + if err := h.Client.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: h.tsNamespace}, cm); err != nil { + return fmt.Errorf("error retrieving nameserver config: %w", err) + } + dnsCfg := operatorutils.TSHosts{Hosts: make(map[string][]string)} + if cm.Data != nil && cm.Data[dnsConfigKey] != "" { + if err := json.Unmarshal([]byte(cm.Data[dnsConfigKey]), &dnsCfg); err != nil { + return err + } + } + update(&dnsCfg) + configBytes, err := json.Marshal(dnsCfg) + if err != nil { + return fmt.Errorf("error marshalling DNS config: %w", err) + } + mak.Set(&cm.Data, dnsConfigKey, string(configBytes)) + return h.Update(ctx, cm) +} + +func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecordFromService(ctx context.Context, svc *corev1.Service) (string, error) { + if tailnetIP := tailnetTargetAnnotation(svc); tailnetIP != "" { + return dnsRR.tailnetFQDNForIP(ctx, tailnetIP) + } + if tailnetFQDN := svc.Annotations[AnnotationTailnetTargetFQDN]; tailnetFQDN != "" { + return tailnetFQDN, nil + } + if hasLoadBalancerClass(svc, dnsRR.isDefaultLoadBalancer) { + if len(svc.Status.LoadBalancer.Ingress) > 0 { + return svc.Status.LoadBalancer.Ingress[0].Hostname, nil + } + return "", nil + } + if hasExposeAnnotation(svc) { + return dnsRR.fqdnFromSecretData(ctx, svc) + } + return "", nil +} + +func (h *dnsRecordsReconciler) tailnetFQDNForIP(ctx context.Context, ip string) (string, error) { + whois, err := h.localAPIClient.WhoIs(ctx, ip) + if err != nil { + h.logger.Errorf("error determining Tailscale node: %v", err) + return "", err + } + fqdn := whois.Node.Name + fqdn = strings.TrimSuffix(fqdn, ".") + return fqdn, nil +} + +func (h *dnsRecordsReconciler) fqdnFromSecretData(ctx context.Context, svc *corev1.Service) (string, error) { + childResourceLabels := map[string]string{ + LabelManaged: "true", + LabelParentName: svc.Name, + LabelParentNamespace: svc.Namespace, + LabelParentType: "svc", + } + secret, err := getSingleObject[corev1.Secret](ctx, h.Client, h.tsNamespace, childResourceLabels) + if err != nil { + return "", err + } + return string(secret.Data["device_fqdn"]), nil +} diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index af09be075..3f91d50d7 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -68,7 +68,7 @@ func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request } else if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get ing: %w", err) } - if !ing.DeletionTimestamp.IsZero() || !a.shouldExpose(ing) { + if !ing.DeletionTimestamp.IsZero() || !isTailscaleIngress(ing) { logger.Debugf("ingress is being deleted or should not be exposed, cleaning up") return reconcile.Result{}, a.maybeCleanup(ctx, logger, ing) } @@ -291,7 +291,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } -func (a *IngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { +func isTailscaleIngress(ing *networkingv1.Ingress) bool { return ing != nil && ing.Spec.IngressClassName != nil && *ing.Spec.IngressClassName == tailscaleIngressClassName diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go new file mode 100644 index 000000000..985bb81fc --- /dev/null +++ b/cmd/k8s-operator/nameserver.go @@ -0,0 +1,293 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// tailscale-operator provides a way to expose services running in a Kubernetes +// cluster to your Tailnet and to make Tailscale nodes available to cluster +// workloads +package main + +import ( + "context" + "encoding/json" + "fmt" + "slices" + "sync" + + _ "embed" + + "github.com/pkg/errors" + "go.uber.org/zap" + xslices "golang.org/x/exp/slices" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstime" + "tailscale.com/types/ptr" + "tailscale.com/util/clientmetric" + "tailscale.com/util/set" +) + +type deployable struct { + yaml []byte + obj client.Object + objTemplate func() client.Object + updateObj func(client.Object, deployCfg) (client.Object, error) + getPatch func(client.Object, deployCfg) (client.Patch, error) +} + +var ( + //go:embed deploy/manifests/nameserver/cm.yaml + cmYaml []byte + //go:embed deploy/manifests/nameserver/deploy.yaml + deployYaml []byte + //go:embed deploy/manifests/nameserver/sa.yaml + saYaml []byte + //go:embed deploy/manifests/nameserver/svc.yaml + svcYaml []byte + + cmDeployable = deployable{ + yaml: cmYaml, + objTemplate: func() client.Object { + return &corev1.ConfigMap{TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}} + }, + obj: &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, + }, + getPatch: func(obj client.Object, _ deployCfg) (client.Patch, error) { return client.MergeFrom(obj), nil }, + updateObj: func(obj client.Object, _ deployCfg) (client.Object, error) { return obj, nil }, + } + deployDeployable = deployable{ + yaml: deployYaml, + obj: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}, + }, + objTemplate: func() client.Object { + return &appsv1.Deployment{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}} + }, + getPatch: func(o client.Object, cfg deployCfg) (client.Patch, error) { + deploy, ok := o.(*appsv1.Deployment) + if !ok { + return nil, errors.New("failed to convert obj to Deployment") + } + deploy.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag) + return client.MergeFrom(deploy), nil + }, + updateObj: func(obj client.Object, cfg deployCfg) (client.Object, error) { + deploy, ok := obj.(*appsv1.Deployment) + if !ok { + return nil, errors.New("failed to convert obj to Deployment") + } + deploy.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag) + return deploy, nil + }, + } + saDeployable = deployable{ + yaml: saYaml, + obj: &corev1.ServiceAccount{TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + getPatch: func(obj client.Object, _ deployCfg) (client.Patch, error) { return client.MergeFrom(obj), nil }, + updateObj: func(obj client.Object, _ deployCfg) (client.Object, error) { return obj, nil }, + objTemplate: func() client.Object { + return &corev1.ServiceAccount{TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}} + }, + } + svcDeployable = deployable{ + yaml: svcYaml, + obj: &corev1.Service{TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + getPatch: func(obj client.Object, _ deployCfg) (client.Patch, error) { return client.MergeFrom(obj), nil }, + updateObj: func(obj client.Object, _ deployCfg) (client.Object, error) { return obj, nil }, + objTemplate: func() client.Object { + return &corev1.Service{TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}} + }, + } +) + +type patch struct { + data []byte +} + +func (p patch) Data(client.Object) []byte { + return p.data +} +func (p patch) Type() types.PatchType { + return types.ApplyPatchType +} + +const ( + reasonNameserverCreationFailed = "NameserverCreationFailed" + reasonMultipleDNSConfigsPresent = "MultipleDNSConfigsPresent" + + reasonNameserverCreated = "NameserverCreated" + + messageNameserverCreationFailed = "Failed creating nameserver resources: %v" + messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present." +) + +type NameserverReconciler struct { + client.Client + logger *zap.SugaredLogger + recorder record.EventRecorder + clock tstime.Clock + tsNamespace string + + mu sync.Mutex // protects following + managedNameservers set.Slice[types.UID] // one or none +} + +var ( + gaugeNameserverResources = clientmetric.NewGauge("k8s_nameserver_resources") +) + +func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := a.logger.With("dnsConfig", req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + // get the dnsconfig in question + var dnsCfg tsapi.DNSConfig + err = a.Get(ctx, req.NamespacedName, &dnsCfg) + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + logger.Debugf("dnsconfig not found, assuming it was deleted") + return reconcile.Result{}, nil + } else if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get dnsconfig: %w", err) + } + if !dnsCfg.DeletionTimestamp.IsZero() { + logger.Debugf("DNSConfig is being deleted, cleaning up resources") + ix := xslices.Index(dnsCfg.Finalizers, FinalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return reconcile.Result{}, nil + } + if err := a.maybeCleanup(ctx, &dnsCfg, logger); err != nil { + logger.Errorf("error cleaning up reconciler resource: %v", err) + return res, err + } + dnsCfg.Finalizers = append(dnsCfg.Finalizers[:ix], dnsCfg.Finalizers[ix+1:]...) + if err := a.Update(ctx, &dnsCfg); err != nil { + logger.Errorf("error removing finalizer: %v", err) + return reconcile.Result{}, err + } + logger.Infof("Nameserver resources cleaned up") + return reconcile.Result{}, nil + } + + oldCnStatus := dnsCfg.Status.DeepCopy() + setStatus := func(dnsCfg *tsapi.DNSConfig, conditionType tsapi.ConnectorConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { + tsoperator.SetDNSConfigCondition(dnsCfg, tsapi.NameserverReady, status, reason, message, dnsCfg.Generation, a.clock, logger) + if !apiequality.Semantic.DeepEqual(oldCnStatus, dnsCfg.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := a.Client.Status().Update(ctx, dnsCfg); updateErr != nil { + err = errors.Wrap(err, updateErr.Error()) + } + } + return res, err + } + var dnsCfgs tsapi.DNSConfigList + if err := a.List(ctx, &dnsCfgs); err != nil { + return res, fmt.Errorf("error listing DNSConfigs: %w", err) + } + if len(dnsCfgs.Items) > 1 { + msg := "invalid cluster configuration: more than one tailscale.com/dnsconfigs found. Please ensure that no more than one is created." + logger.Error(msg) + a.recorder.Event(&dnsCfg, corev1.EventTypeWarning, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent) + setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent) + } + + if !slices.Contains(dnsCfg.Finalizers, FinalizerName) { + logger.Infof("ensuring nameserver resources") + dnsCfg.Finalizers = append(dnsCfg.Finalizers, FinalizerName) + if err := a.Update(ctx, &dnsCfg); err != nil { + msg := fmt.Sprintf(messageNameserverCreationFailed, err) + logger.Error(msg) + return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonNameserverCreationFailed, msg) + } + } + if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil { + return reconcile.Result{}, fmt.Errorf("error provisioning nameserver resources: %w", err) + } + + a.mu.Lock() + a.managedNameservers.Add(dnsCfg.UID) + a.mu.Unlock() + gaugeNameserverResources.Set(int64(a.managedNameservers.Len())) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: a.tsNamespace}, + } + if err := a.Client.Get(ctx, client.ObjectKeyFromObject(svc), svc); err != nil { + return res, fmt.Errorf("error getting Service: %w", err) + } + if ip := svc.Spec.ClusterIP; ip != "" && ip != "None" { + dnsCfg.Status.NameserverStatus = &tsapi.NameserverStatus{ + IP: ip, + } + return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated) + } + logger.Info("nameserver Service does not yet have an IP address, waiting..") + return reconcile.Result{Requeue: true}, nil +} + +type deployCfg struct { + imageRepo string + imageTag string +} + +func (a *NameserverReconciler) maybeProvision(ctx context.Context, dnsCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { + crl := childResourceLabels(dnsCfg.Name, a.tsNamespace, "nameserver") + cfg := deployCfg{ + imageRepo: "tailscale/k8s-nameserver", + imageTag: "unstable", + } + if dnsCfg.Spec.Nameserver.Image.Repo != "" { + cfg.imageRepo = dnsCfg.Spec.Nameserver.Image.Repo + } + if dnsCfg.Spec.Nameserver.Image.Tag != "" { + cfg.imageTag = dnsCfg.Spec.Nameserver.Image.Tag + } + for _, deployable := range []deployable{cmDeployable, saDeployable, svcDeployable, deployDeployable} { + obj := deployable.objTemplate() + if err := yaml.Unmarshal(deployable.yaml, obj); err != nil { + return fmt.Errorf("error unmarshalling yaml: %w", err) + } + obj.SetLabels(crl) + obj.SetNamespace(a.tsNamespace) + obj.SetOwnerReferences([]metav1.OwnerReference{*metav1.NewControllerRef(dnsCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))}) + obj, err := deployable.updateObj(obj, cfg) + if err != nil { + return fmt.Errorf("error updating object of kind: %s", obj.GetObjectKind().GroupVersionKind().Kind) + } + bs, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("error marshaling object: %s", obj.GetObjectKind().GroupVersionKind().Kind) + } + patch := client.RawPatch(types.ApplyPatchType, bs) + logger.Infof("about to apply patch for group: %s, kind: %s, version: %s", obj.GetObjectKind().GroupVersionKind().Group, obj.DeepCopyObject().GetObjectKind().GroupVersionKind().Kind, obj.GetObjectKind().GroupVersionKind().Version) + if err := a.Client.Patch(ctx, obj, patch, &client.PatchOptions{ + Force: ptr.To(true), + FieldManager: "nameserver-reconciler", + }); err != nil { + return fmt.Errorf("error patching resource: %w", err) + } + } + return nil +} + +func (a *NameserverReconciler) maybeCleanup(ctx context.Context, dnsCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { + a.mu.Lock() + a.managedNameservers.Remove(dnsCfg.UID) + a.mu.Unlock() + gaugeNameserverResources.Set(int64(a.managedNameservers.Len())) + return nil +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 483a88bba..66c3db529 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -10,7 +10,6 @@ package main import ( "context" "os" - "regexp" "strings" "time" @@ -20,6 +19,7 @@ import ( "golang.org/x/oauth2/clientcredentials" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" @@ -220,8 +220,12 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string // resources that we GET via the controller manager's client. Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ - &corev1.Secret{}: nsFilter, - &appsv1.StatefulSet{}: nsFilter, + &corev1.Secret{}: nsFilter, + &corev1.ServiceAccount{}: nsFilter, + &corev1.ConfigMap{}: nsFilter, + &appsv1.StatefulSet{}: nsFilter, + &appsv1.Deployment{}: nsFilter, + &discoveryv1.EndpointSlice{}: nsFilter, }, }, Scheme: tsapi.GlobalScheme, @@ -291,7 +295,63 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string clock: tstime.DefaultClock{}, }) if err != nil { - startlog.Fatal("could not create connector reconciler: %v", err) + startlog.Fatalf("could not create connector reconciler: %v", err) + } + // TODO (irbekrm): switch to metadata-only watches for resources whose + // spec we don't need to inspect to reduce memory consumption + // https://github.com/kubernetes-sigs/controller-runtime/issues/1159 + nameserverFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("nameserver")) + err = builder.ControllerManagedBy(mgr). + For(&tsapi.DNSConfig{}). + Watches(&appsv1.Deployment{}, nameserverFilter). + Watches(&corev1.ConfigMap{}, nameserverFilter). + Watches(&corev1.Service{}, nameserverFilter). + Watches(&corev1.ServiceAccount{}, nameserverFilter). + Complete(&NameserverReconciler{ + recorder: eventRecorder, + tsNamespace: tsNamespace, + + Client: mgr.GetClient(), + logger: zlog.Named("nameserver-reconciler"), + clock: tstime.DefaultClock{}, + }) + if err != nil { + startlog.Fatalf("could not create nameserver reconciler: %v", err) + } + lc, err := s.LocalClient() + if err != nil { + startlog.Fatalf("error retrieving local client: %w", err) + } + // On DNSConfig changes, reconcile all EndpointSlices in operator namespace. + dnsConfigFilter := handler.EnqueueRequestsFromMapFunc(enqueueAllEndpointSlicesInNS(tsNamespace, mgr.GetClient())) + // On Secret changes, if it has the tailscale labels and is for an + // ingress/egress proxy, reconcile the EndpointSlice for the proxy's + // headless Service. We need to watch Secrets because this is where the + // dns-records-reconciler reads the MagicDNS name from for ingress + // proxies exposed via an annotation. + epsForSecretFilter := handler.EnqueueRequestsFromMapFunc(enqueueEndpointSliceForSecret(tsNamespace, mgr.GetClient())) + // The only Service changes the dns-records-reconciler is interested in + // are changes to svc.status.loadBalancer.ingress.hostname, so only + // reconcile proxy EndpointSlices associated with LoadBalancer Services + // exposed via Tailscale. + epsForServiceFilter := handler.EnqueueRequestsFromMapFunc(enqueueEndpointSliceForService(tsNamespace, mgr.GetClient(), startlog, isDefaultLoadBalancer)) + // If a tailscale Ingress changes, reconcile the EndpointSlice for the proxy's headless Service. + epsForIngressFilter := handler.EnqueueRequestsFromMapFunc(enqueueEndpointSliceForIngress(tsNamespace, mgr.GetClient(), startlog)) + err = builder.ControllerManagedBy(mgr). + For(&discoveryv1.EndpointSlice{}). + Watches(&tsapi.DNSConfig{}, dnsConfigFilter). + Watches(&corev1.Secret{}, epsForSecretFilter). + Watches(&corev1.Service{}, epsForServiceFilter). + Watches(&networkingv1.Ingress{}, epsForIngressFilter). + Complete(&dnsRecordsReconciler{ + Client: mgr.GetClient(), + tsNamespace: tsNamespace, + localAPIClient: lc, + logger: zlog.Named("dns-records-reconciler"), + isDefaultLoadBalancer: isDefaultLoadBalancer, + }) + if err != nil { + startlog.Fatalf("could not create DNS records reconciler: %v", err) } startlog.Infof("Startup complete, operator running, version: %s", version.Long()) if err := mgr.Start(signals.SetupSignalHandler()); err != nil { @@ -330,14 +390,87 @@ func managedResourceHandlerForType(typ string) handler.MapFunc { {NamespacedName: parentFromObjectLabels(o)}, } } - } +func enqueueAllEndpointSlicesInNS(ns string, cl client.Reader) handler.MapFunc { + return func(ctx context.Context, _ client.Object) []reconcile.Request { + eps := &discoveryv1.EndpointSliceList{} + if err := cl.List(ctx, eps, client.InNamespace(ns)); err != nil { + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ep := range eps.Items { + reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: ep.Namespace, Name: ep.Name}}) + } + return reqs + } +} + +func enqueueEndpointSliceForSecret(ns string, cl client.Client) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + if !isManagedByType(o, "ingress") && !isManagedByType(o, "svc") { + return nil + } + svcName := o.GetName()[:strings.LastIndexAny(o.GetName(), "-")] // secret name is -0 + eps, err := getSingleObject[discoveryv1.EndpointSlice](ctx, cl, ns, map[string]string{discoveryv1.LabelServiceName: svcName}) + if err != nil || eps == nil { + return nil + } + return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: eps.Namespace, Name: eps.Name}}} + } +} + +func enqueueEndpointSliceForService(ns string, cl client.Client, log *zap.SugaredLogger, isDefaultLoadBalancerClass bool) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + svc, ok := o.(*corev1.Service) + if !ok { + return nil + } + if !hasLoadBalancerClass(svc, isDefaultLoadBalancerClass) { + return nil + } + crl := childResourceLabels(svc.Name, svc.Namespace, "svc") + return endpointSliceRequests(ctx, cl, ns, crl) + } +} + +func enqueueEndpointSliceForIngress(ns string, cl client.Client, log *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + ing, ok := o.(*networkingv1.Ingress) + if !ok { + return nil + } + if !isTailscaleIngress(ing) { + return nil + } + crl := childResourceLabels(ing.Name, ing.Namespace, "ingress") + return endpointSliceRequests(ctx, cl, ns, crl) + } +} + +func endpointSliceRequests(ctx context.Context, cl client.Client, ns string, crl map[string]string) []reconcile.Request { + // TODO (irbekrm): experiment with indexing endpoint slices in + // cache so that they can be directly filtered for a parent + // Service- this might be more efficient than filtering than + // getting the headless Service each time. + svc, err := getSingleObject[corev1.Service](ctx, cl, ns, crl) // get headless Service for proxy + if err != nil { + return nil + } + if svc == nil { + return nil + } + epsLabels := map[string]string{discoveryv1.LabelServiceName: svc.Name} + eps, err := getSingleObject[discoveryv1.EndpointSlice](ctx, cl, ns, epsLabels) + if err != nil || eps == nil { + return nil + } + return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: eps.Namespace, Name: eps.Name}}} +} func serviceHandler(_ context.Context, o client.Object) []reconcile.Request { if isManagedByType(o, "svc") { // If this is a Service managed by a Service we want to enqueue its parent return []reconcile.Request{{NamespacedName: parentFromObjectLabels(o)}} - } if isManagedResource(o) { // If this is a Servce managed by a resource that is not a Service, we leave it alone @@ -352,12 +485,4 @@ func serviceHandler(_ context.Context, o client.Object) []reconcile.Request { }, }, } - -} - -// isMagicDNSName reports whether name is a full tailnet node FQDN (with or -// without final dot). -func isMagicDNSName(name string) bool { - validMagicDNSName := regexp.MustCompile(`^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+\.ts\.net\.?$`) - return validMagicDNSName.MatchString(name) } diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index d6b810e73..c32f657fe 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -20,6 +20,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + operatorutils "tailscale.com/k8s-operator" "tailscale.com/util/clientmetric" "tailscale.com/util/set" ) @@ -80,7 +81,7 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request } else if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err) } - targetIP := a.tailnetTargetAnnotation(svc) + targetIP := tailnetTargetAnnotation(svc) targetFQDN := svc.Annotations[AnnotationTailnetTargetFQDN] if !svc.DeletionTimestamp.IsZero() || !a.shouldExpose(svc) && targetIP == "" && targetFQDN == "" { logger.Debugf("service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up") @@ -190,7 +191,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga sts.ClusterTargetIP = svc.Spec.ClusterIP a.managedIngressProxies.Add(svc.UID) gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len())) - } else if ip := a.tailnetTargetAnnotation(svc); ip != "" { + } else if ip := tailnetTargetAnnotation(svc); ip != "" { sts.TailnetTargetIP = ip a.managedEgressProxies.Add(svc.UID) gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len())) @@ -275,7 +276,7 @@ func validateService(svc *corev1.Service) []string { violations = append(violations, "only one of annotations %s and %s can be set", AnnotationTailnetTargetIP, AnnotationTailnetTargetFQDN) } if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" { - if !isMagicDNSName(fqdn) { + if !operatorutils.IsMagicDNSName(fqdn) { violations = append(violations, fmt.Sprintf("invalid value of annotation %s: %q does not appear to be a valid MagicDNS name", AnnotationTailnetTargetFQDN, fqdn)) } } @@ -289,7 +290,7 @@ func (a *ServiceReconciler) shouldExpose(svc *corev1.Service) bool { return false } - return a.hasLoadBalancerClass(svc) || a.hasExposeAnnotation(svc) + return hasLoadBalancerClass(svc, a.isDefaultLoadBalancer) || hasExposeAnnotation(svc) } func (a *ServiceReconciler) hasLoadBalancerClass(svc *corev1.Service) bool { @@ -301,7 +302,7 @@ func (a *ServiceReconciler) hasLoadBalancerClass(svc *corev1.Service) bool { // hasExposeAnnotation reports whether Service has the tailscale.com/expose // annotation set -func (a *ServiceReconciler) hasExposeAnnotation(svc *corev1.Service) bool { +func hasExposeAnnotation(svc *corev1.Service) bool { return svc != nil && svc.Annotations[AnnotationExpose] == "true" } @@ -309,7 +310,7 @@ func (a *ServiceReconciler) hasExposeAnnotation(svc *corev1.Service) bool { // annotation or of the deprecated tailscale.com/ts-tailnet-target-ip // annotation. If neither is set, it returns an empty string. If both are set, // it returns the value of the new annotation. -func (a *ServiceReconciler) tailnetTargetAnnotation(svc *corev1.Service) string { +func tailnetTargetAnnotation(svc *corev1.Service) string { if svc == nil { return "" } @@ -318,3 +319,10 @@ func (a *ServiceReconciler) tailnetTargetAnnotation(svc *corev1.Service) string } return svc.Annotations[annotationTailnetTargetIPOld] } + +func hasLoadBalancerClass(svc *corev1.Service, isDefaultLoadBalancer bool) bool { + return svc != nil && + svc.Spec.Type == corev1.ServiceTypeLoadBalancer && + (svc.Spec.LoadBalancerClass != nil && *svc.Spec.LoadBalancerClass == "tailscale" || + svc.Spec.LoadBalancerClass == nil && isDefaultLoadBalancer) +} diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index d8929a9f5..7295d42f3 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -49,7 +49,7 @@ func init() { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}) + scheme.AddKnownTypes(SchemeGroupVersion, &Connector{}, &ConnectorList{}, &DNSConfig{}, &DNSConfigList{}) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go new file mode 100644 index 000000000..d0cfd352b --- /dev/null +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -0,0 +1,72 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Code comments on these types should be treated as user facing documentation- +// they will appear on the DNSConfig CRD i.e if someone runs kubectl explain dnsconfig. + +var DNSConfigKind = "DNSConfig" + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=dc +// +kubebuilder:printcolumn:name="NameserverIP",type="string",JSONPath=`.status.nameserverStatus.ip`,description="Status of the deployed Connector resources." + +type DNSConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DNSConfigSpec `json:"spec"` + + // +optional + Status DNSConfigStatus `json:"status"` +} + +// +kubebuilder:object:root=true + +type DNSConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []DNSConfig `json:"items"` +} + +type DNSConfigSpec struct { + Nameserver *Nameserver `json:"nameserver"` +} + +type Nameserver struct { + // +optional + Image *Image `json:"image,omitempty"` +} + +type Image struct { + // +optional + Repo string `json:"repo,omitempty"` + // +optional + Tag string `json:"tag,omitempty"` +} + +type DNSConfigStatus struct { + // TODO: rename ConnectorCondition to sth like ComponentCondition + // +listType=map + // +listMapKey=type + // +optional + Conditions []ConnectorCondition `json:"conditions"` + // +optional + NameserverStatus *NameserverStatus `json:"nameserverStatus"` +} + +type NameserverStatus struct { + // +optional + IP string `json:"ip"` +} + +const NameserverReady ConnectorConditionType = `NameserverReady` diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index f0f54b533..dc6198512 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -136,6 +136,162 @@ func (in *ConnectorStatus) DeepCopy() *ConnectorStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfig) DeepCopyInto(out *DNSConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfig. +func (in *DNSConfig) DeepCopy() *DNSConfig { + if in == nil { + return nil + } + out := new(DNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigList) DeepCopyInto(out *DNSConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigList. +func (in *DNSConfigList) DeepCopy() *DNSConfigList { + if in == nil { + return nil + } + out := new(DNSConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigSpec) DeepCopyInto(out *DNSConfigSpec) { + *out = *in + if in.Nameserver != nil { + in, out := &in.Nameserver, &out.Nameserver + *out = new(Nameserver) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigSpec. +func (in *DNSConfigSpec) DeepCopy() *DNSConfigSpec { + if in == nil { + return nil + } + out := new(DNSConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigStatus) DeepCopyInto(out *DNSConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConnectorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NameserverStatus != nil { + in, out := &in.NameserverStatus, &out.NameserverStatus + *out = new(NameserverStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigStatus. +func (in *DNSConfigStatus) DeepCopy() *DNSConfigStatus { + if in == nil { + return nil + } + out := new(DNSConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Nameserver) DeepCopyInto(out *Nameserver) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver. +func (in *Nameserver) DeepCopy() *Nameserver { + if in == nil { + return nil + } + out := new(Nameserver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverStatus) DeepCopyInto(out *NameserverStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverStatus. +func (in *NameserverStatus) DeepCopy() *NameserverStatus { + if in == nil { + return nil + } + out := new(NameserverStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Routes) DeepCopyInto(out *Routes) { { diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index f1344e34c..65d292a36 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -19,6 +19,26 @@ import ( // given attributes. LastTransitionTime gets set every time condition's status // changes func SetConnectorCondition(cn *tsapi.Connector, conditionType tsapi.ConnectorConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) { + conds := updateCondition(cn.Status.Conditions, conditionType, status, reason, message, gen, clock, logger) + cn.Status.Conditions = conds +} + +// RemoveConnectorCondition will remove condition of the given type +func RemoveConnectorCondition(conn *tsapi.Connector, conditionType tsapi.ConnectorConditionType) { + conn.Status.Conditions = slices.DeleteFunc(conn.Status.Conditions, func(cond tsapi.ConnectorCondition) bool { + return cond.Type == conditionType + }) +} + +// SetDNSConfigCondition ensures that DNSConfig status has a condition with the +// given attributes. LastTransitionTime gets set every time condition's status +// changes +func SetDNSConfigCondition(dnsCfg *tsapi.DNSConfig, conditionType tsapi.ConnectorConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) { + conds := updateCondition(dnsCfg.Status.Conditions, conditionType, status, reason, message, gen, clock, logger) + dnsCfg.Status.Conditions = conds +} + +func updateCondition(conds []tsapi.ConnectorCondition, conditionType tsapi.ConnectorConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []tsapi.ConnectorCondition { newCondition := tsapi.ConnectorCondition{ Type: conditionType, Status: status, @@ -30,31 +50,34 @@ func SetConnectorCondition(cn *tsapi.Connector, conditionType tsapi.ConnectorCon nowTime := metav1.NewTime(clock.Now()) newCondition.LastTransitionTime = &nowTime - idx := xslices.IndexFunc(cn.Status.Conditions, func(cond tsapi.ConnectorCondition) bool { + idx := xslices.IndexFunc(conds, func(cond tsapi.ConnectorCondition) bool { return cond.Type == conditionType }) if idx == -1 { - cn.Status.Conditions = append(cn.Status.Conditions, newCondition) - return + conds = append(conds, newCondition) + return conds } // Update the existing condition - cond := cn.Status.Conditions[idx] + cond := conds[idx] // If this update doesn't contain a state transition, we don't update // the conditions LastTransitionTime to Now() if cond.Status == status { newCondition.LastTransitionTime = cond.LastTransitionTime } else { - logger.Info("Status change for Connector condition %s from %s to %s", conditionType, cond.Status, status) + logger.Info("Status change for condition %s from %s to %s", conditionType, cond.Status, status) } - - cn.Status.Conditions[idx] = newCondition + return conds } -// RemoveConnectorCondition will remove condition of the given type -func RemoveConnectorCondition(conn *tsapi.Connector, conditionType tsapi.ConnectorConditionType) { - conn.Status.Conditions = slices.DeleteFunc(conn.Status.Conditions, func(cond tsapi.ConnectorCondition) bool { - return cond.Type == conditionType +func DNSCfgIsReady(cfg *tsapi.DNSConfig) bool { + idx := xslices.IndexFunc(cfg.Status.Conditions, func(cond tsapi.ConnectorCondition) bool { + return cond.Type == tsapi.NameserverReady }) + if idx == -1 { + return false + } + cond := cfg.Status.Conditions[idx] + return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == cfg.Generation } diff --git a/k8s-operator/tsdns.go b/k8s-operator/tsdns.go new file mode 100644 index 000000000..1d80ca699 --- /dev/null +++ b/k8s-operator/tsdns.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package kube + +import ( + "encoding/json" + "fmt" + "net/netip" + + "tailscale.com/types/logger" + "tailscale.com/util/dnsname" +) + +// TSHosts is a mapping of MagicDNS names to a list IPv4 or IPv6 addresses. +type TSHosts struct { + Hosts map[string][]string `json:"hosts"` +} + +func NewTSHosts(bs []byte, log logger.Logf) (*TSHosts, error) { + cfg := &TSHosts{} + if err := json.Unmarshal(bs, cfg); err != nil { + return nil, fmt.Errorf("error unmarshaling json bytes: %w", err) + } + // Validate the unmarshalled Hosts entries. In case of an invalid entry, + // delete it and log an error, but do not invalidate the result. + for key, val := range cfg.Hosts { + fqdn, err := dnsname.ToFQDN(key) + if err != nil { + log("error parsing DNS name %s: %v, skipping", key, err) + delete(cfg.Hosts, key) + break + } + if !IsMagicDNSName(string(fqdn)) { + log("DNS name %s is not a MagicDNS name, skipping", fqdn) + delete(cfg.Hosts, key) + break + } + for _, ip := range val { + if _, err := netip.ParseAddr(ip); err != nil { + log("IP %s is not a valid IP address, skipping", ip) + } + } + } + return cfg, nil +} diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go new file mode 100644 index 000000000..805765165 --- /dev/null +++ b/k8s-operator/utils.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package kube + +import "regexp" + +// isMagicDNSName reports whether name is a full tailnet node FQDN (with or +// without final dot). +func IsMagicDNSName(name string) bool { + validMagicDNSName := regexp.MustCompile(`^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+\.ts\.net\.?$`) + return validMagicDNSName.MatchString(name) +}