From 05b3ef26fd24f13084df6aaafec4625401e7ae03 Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 16 Mar 2026 21:34:35 +0000 Subject: [PATCH] cmd/k8s-operator: migrate to tailscale-client-go-v2 This commit modifies the kubernetes operator to use the `tailscale-client-go-v2` package instead of the internal tailscale client it was previously using. This now gives us the ability to expand out custom resources and features as they become available via the API module. The tailnet reconciler has also been modified to manage clients as tailnets are created and removed, providing each subsequent reconciler with a single `ClientProvider` that obtains a tailscale client for the respective tailnet by name, or the operator's default when presented with a blank string. Fixes: https://github.com/tailscale/corp/issues/38418 Signed-off-by: David Bond --- cmd/containerboot/main.go | 3 +- cmd/k8s-operator/api-server-proxy-pg.go | 62 ++--- cmd/k8s-operator/api-server-proxy-pg_test.go | 38 +-- cmd/k8s-operator/connector_test.go | 10 +- cmd/k8s-operator/depaware.txt | 6 +- cmd/k8s-operator/e2e/setup.go | 97 ++++---- cmd/k8s-operator/ingress-for-pg.go | 73 +++--- cmd/k8s-operator/ingress-for-pg_test.go | 29 +-- cmd/k8s-operator/ingress_test.go | 23 +- cmd/k8s-operator/operator.go | 73 +++--- cmd/k8s-operator/operator_test.go | 82 +++---- cmd/k8s-operator/proxygroup.go | 91 +++---- cmd/k8s-operator/proxygroup_test.go | 43 ++-- cmd/k8s-operator/sts.go | 187 +++++++-------- cmd/k8s-operator/svc-for-pg.go | 82 ++++--- cmd/k8s-operator/svc-for-pg_test.go | 28 ++- cmd/k8s-operator/svc_test.go | 4 +- cmd/k8s-operator/tailnet.go | 71 ------ cmd/k8s-operator/testutils_test.go | 226 ++++++++++-------- cmd/k8s-operator/tsclient.go | 71 +++--- cmd/k8s-operator/tsclient_test.go | 135 ----------- cmd/k8s-operator/tsrecorder.go | 80 +++---- cmd/k8s-operator/tsrecorder_test.go | 26 +- flake.nix | 2 +- go.mod | 5 +- go.mod.sri | 2 +- go.sum | 10 +- k8s-operator/reconciler/tailnet/mocks_test.go | 50 +++- k8s-operator/reconciler/tailnet/tailnet.go | 70 +++--- .../reconciler/tailnet/tailnet_test.go | 12 +- k8s-operator/tsclient/client.go | 70 ++++++ k8s-operator/tsclient/provider.go | 67 ++++++ shell.nix | 2 +- 33 files changed, 894 insertions(+), 936 deletions(-) delete mode 100644 cmd/k8s-operator/tailnet.go delete mode 100644 cmd/k8s-operator/tsclient_test.go create mode 100644 k8s-operator/tsclient/client.go create mode 100644 k8s-operator/tsclient/provider.go diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 76c6e910a..708785747 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -136,7 +136,7 @@ import ( "time" "golang.org/x/sys/unix" - "tailscale.com/client/tailscale" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/conffile" @@ -173,7 +173,6 @@ func main() { func run() error { log.SetPrefix("boot: ") - tailscale.I_Acknowledge_This_API_Is_Unstable = true cfg, err := configFromEnv() if err != nil { diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go index 0900fd0aa..ac2f73577 100644 --- a/cmd/k8s-operator/api-server-proxy-pg.go +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -23,10 +23,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" - "tailscale.com/internal/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" @@ -51,7 +52,7 @@ type KubeAPIServerTSServiceReconciler struct { client.Client recorder record.EventRecorder logger *zap.SugaredLogger - tsClient tsClient + clients ClientProvider tsNamespace string defaultTags []string operatorID string // stableID of the operator's Tailscale device @@ -77,15 +78,14 @@ func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req re serviceName := serviceNameForAPIServerProxy(pg) logger = logger.With("Tailscale Service", serviceName) - - tailscaleClient, err := r.getClient(ctx, pg.Spec.Tailnet) + tsClient, err := r.clients.For(pg.Spec.Tailnet) if err != nil { return res, fmt.Errorf("failed to get tailscale client: %w", err) } if markedForDeletion(pg) { logger.Debugf("ProxyGroup is being deleted, ensuring any created resources are cleaned up") - if err = r.maybeCleanup(ctx, serviceName, pg, logger, tailscaleClient); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) { + if err = r.maybeCleanup(ctx, serviceName, pg, logger, tsClient); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return res, nil } @@ -93,7 +93,7 @@ func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req re return res, err } - err = r.maybeProvision(ctx, serviceName, pg, logger, tailscaleClient) + err = r.maybeProvision(ctx, serviceName, pg, logger, tsClient) if err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) @@ -105,31 +105,15 @@ func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req re return reconcile.Result{}, nil } -// getClient returns the appropriate Tailscale client for the given tailnet. -// If no tailnet is specified, returns the default client. -func (r *KubeAPIServerTSServiceReconciler) getClient(ctx context.Context, tailnetName string) (tsClient, - error) { - if tailnetName == "" { - return r.tsClient, nil - } - - tc, _, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) - if err != nil { - return nil, err - } - - return tc, nil -} - // maybeProvision ensures that a Tailscale Service for this ProxyGroup exists // and is up to date. // // Returns true if the operation resulted in a Tailscale Service update. -func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (err error) { +func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsclient.Client) (err error) { var dnsName string oldPGStatus := pg.Status.DeepCopy() defer func() { - podsAdvertising, podsErr := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) + podsAdvertising, podsErr := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName.String()) if podsErr != nil { err = errors.Join(err, fmt.Errorf("failed to get number of advertised Pods: %w", podsErr)) // Continue, updating the status with the best available information. @@ -177,8 +161,8 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s // 1. Check there isn't a Tailscale Service with the same hostname // already created and not owned by this ProxyGroup. - existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) - if err != nil && !isErrorTailscaleServiceNotFound(err) { + existingTSSvc, err := tsClient.VIPServices().Get(ctx, serviceName.String()) + if err != nil && !tailscale.IsNotFound(err) { return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) } @@ -202,8 +186,8 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s serviceTags = pg.Spec.Tags.Stringify() } - tsSvc := &tailscale.VIPService{ - Name: serviceName, + tsSvc := tailscale.VIPService{ + Name: serviceName.String(), Tags: serviceTags, Ports: []string{"tcp:443"}, Comment: managedTSServiceComment, @@ -216,10 +200,10 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s // 2. Ensure the Tailscale Service exists and is up to date. if existingTSSvc == nil || !slices.Equal(tsSvc.Tags, existingTSSvc.Tags) || - !ownersAreSetAndEqual(tsSvc, existingTSSvc) || + !ownersAreSetAndEqual(tsSvc, *existingTSSvc) || !slices.Equal(tsSvc.Ports, existingTSSvc.Ports) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err = tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err = tsClient.VIPServices().CreateOrUpdate(ctx, tsSvc); err != nil { return fmt.Errorf("error creating Tailscale Service: %w", err) } } @@ -248,10 +232,10 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s } // maybeCleanup ensures that any resources, such as a Tailscale Service created for this Service, are cleaned up when the -// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only +// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup. The Tailscale Service is only // deleted if it does not contain any other owner references. If it does, the cleanup only removes the owner reference // corresponding to this Service. -func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (err error) { +func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, client tsclient.Client) (err error) { ix := slices.Index(pg.Finalizers, proxyPGFinalizerName) if ix < 0 { logger.Debugf("no finalizer, nothing to do") @@ -265,7 +249,7 @@ func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, ser } }() - if _, err = cleanupTailscaleService(ctx, tsClient, serviceName, r.operatorID, logger); err != nil { + if _, err = cleanupTailscaleService(ctx, client, serviceName.String(), r.operatorID, logger); err != nil { return fmt.Errorf("error deleting Tailscale Service: %w", err) } @@ -278,16 +262,16 @@ func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, ser // maybeDeleteStaleServices deletes Services that have previously been created for // this ProxyGroup but are no longer needed. -func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) error { +func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsclient.Client) error { serviceName := serviceNameForAPIServerProxy(pg) - svcs, err := tsClient.ListVIPServices(ctx) + svcs, err := tsClient.VIPServices().List(ctx) if err != nil { return fmt.Errorf("error listing Tailscale Services: %w", err) } - for _, svc := range svcs.VIPServices { - if svc.Name == serviceName { + for _, svc := range svcs { + if svc.Name == serviceName.String() { continue } @@ -306,11 +290,11 @@ func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context. } logger.Infof("Deleting Tailscale Service %s", svc.Name) - if err = tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + if err = tsClient.VIPServices().Delete(ctx, svc.Name); err != nil && !tailscale.IsNotFound(err) { return fmt.Errorf("error deleting Tailscale Service %s: %w", svc.Name, err) } - if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, svc.Name, pg); err != nil { + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, tailcfg.ServiceName(svc.Name), pg); err != nil { return fmt.Errorf("failed to clean up cert resources: %w", err) } } diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go index 52dda93e5..889ef064b 100644 --- a/cmd/k8s-operator/api-server-proxy-pg_test.go +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -16,10 +16,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/client/tailscale/v2" - "tailscale.com/internal/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" @@ -93,8 +94,10 @@ func TestAPIServerProxyReconciler(t *testing.T) { expectEqual(t, fc, pgCfgSecret) } - ft := &fakeTSClient{} - ingressTSSvc := &tailscale.VIPService{ + ft := &fakeTSClient{ + vipServices: make(map[string]tailscale.VIPService), + } + ingressTSSvc := tailscale.VIPService{ Name: "svc:some-ingress-hostname", Comment: managedTSServiceComment, Annotations: map[string]string{ @@ -105,11 +108,11 @@ func TestAPIServerProxyReconciler(t *testing.T) { Tags: []string{"tag:k8s"}, Addrs: []string{"5.6.7.8"}, } - ft.CreateOrUpdateVIPService(t.Context(), ingressTSSvc) + ft.VIPServices().CreateOrUpdate(t.Context(), ingressTSSvc) r := &KubeAPIServerTSServiceReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, tsNamespace: ns, logger: zap.Must(zap.NewDevelopment()).Sugar(), @@ -119,7 +122,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { } // Create a Tailscale Service that will conflict with the initial config. - if err := ft.CreateOrUpdateVIPService(t.Context(), &tailscale.VIPService{ + if err := ft.VIPServices().CreateOrUpdate(t.Context(), tailscale.VIPService{ Name: "svc:" + pgName, }); err != nil { t.Fatalf("creating initial Tailscale Service: %v", err) @@ -135,7 +138,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { expectEqual(t, fc, pgCfgSecret) // Unchanged. // Delete Tailscale Service; should see Service created and valid condition updated to true. - if err := ft.DeleteVIPService(t.Context(), "svc:"+pgName); err != nil { + if err := ft.VIPServices().Delete(t.Context(), "svc:"+pgName); err != nil { t.Fatalf("deleting initial Tailscale Service: %v", err) } @@ -154,7 +157,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { expectReconciled(t, r, "", pgName) - tsSvc, err := ft.GetVIPService(t.Context(), "svc:"+pgName) + tsSvc, err := ft.VIPServices().Get(t.Context(), "svc:"+pgName) if err != nil { t.Fatalf("getting Tailscale Service: %v", err) } @@ -223,15 +226,15 @@ func TestAPIServerProxyReconciler(t *testing.T) { p.Spec.KubeAPIServer = pg.Spec.KubeAPIServer }) expectReconciled(t, r, "", pgName) - _, err = ft.GetVIPService(t.Context(), "svc:"+pgName) - if !isErrorTailscaleServiceNotFound(err) { + _, err = ft.VIPServices().Get(t.Context(), "svc:"+pgName) + if !tailscale.IsNotFound(err) { t.Fatalf("Expected 404, got: %v", err) } - tsSvc, err = ft.GetVIPService(t.Context(), updatedServiceName) + tsSvc, err = ft.VIPServices().Get(t.Context(), updatedServiceName.String()) if err != nil { t.Fatalf("Expected renamed svc, got error: %v", err) } - expectedTSSvc.Name = updatedServiceName + expectedTSSvc.Name = updatedServiceName.String() if !reflect.DeepEqual(tsSvc, expectedTSSvc) { t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) } @@ -269,17 +272,17 @@ func TestAPIServerProxyReconciler(t *testing.T) { expectMissing[corev1.Secret](t, fc, ns, updatedDomain) expectMissing[rbacv1.Role](t, fc, ns, updatedDomain) expectMissing[rbacv1.RoleBinding](t, fc, ns, updatedDomain) - _, err = ft.GetVIPService(t.Context(), updatedServiceName) - if !isErrorTailscaleServiceNotFound(err) { + _, err = ft.VIPServices().Get(t.Context(), updatedServiceName.String()) + if !tailscale.IsNotFound(err) { t.Fatalf("Expected 404, got: %v", err) } // Ingress Tailscale Service should not be affected. - svc, err := ft.GetVIPService(t.Context(), ingressTSSvc.Name) + svc, err := ft.VIPServices().Get(t.Context(), ingressTSSvc.Name) if err != nil { t.Fatalf("getting ingress Tailscale Service: %v", err) } - if !reflect.DeepEqual(svc, ingressTSSvc) { + if !reflect.DeepEqual(svc, &ingressTSSvc) { t.Fatalf("expected ingress Tailscale Service to be unmodified %+v, got %+v", ingressTSSvc, svc) } } @@ -292,8 +295,7 @@ func TestExclusiveOwnerAnnotations(t *testing.T) { }, } const ( - selfOperatorID = "self-id" - pg1Owner = `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg1","uid":"pg1-uid"}}]}` + pg1Owner = `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg1","uid":"pg1-uid"}}]}` ) for name, tc := range map[string]struct { diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index 110ad1bf1..69e8e287d 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -19,7 +19,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/util/mak" @@ -62,7 +64,7 @@ func TestConnector(t *testing.T) { recorder: record.NewFakeRecorder(10), ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -252,7 +254,7 @@ func TestConnectorWithProxyClass(t *testing.T) { clock: cl, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -346,7 +348,7 @@ func TestConnectorWithAppConnector(t *testing.T) { clock: cl, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -446,7 +448,7 @@ func TestConnectorWithMultipleReplicas(t *testing.T) { clock: cl, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index cbb4738d7..beddd0544 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -784,8 +784,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale/v2 from tailscale.com/cmd/k8s-operator+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ @@ -816,7 +817,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ + tailscale.com/internal/client/tailscale from tailscale.com/feature/identityfederation+ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ @@ -839,6 +840,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording + tailscale.com/k8s-operator/tsclient from tailscale.com/cmd/k8s-operator+ tailscale.com/kube/egressservices from tailscale.com/cmd/k8s-operator tailscale.com/kube/ingressservices from tailscale.com/cmd/k8s-operator tailscale.com/kube/k8s-proxy/conf from tailscale.com/cmd/k8s-operator diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go index e3d7ed89b..8171dd036 100644 --- a/cmd/k8s-operator/e2e/setup.go +++ b/cmd/k8s-operator/e2e/setup.go @@ -4,7 +4,6 @@ package e2e import ( - "bytes" "context" "crypto/rand" "crypto/tls" @@ -53,12 +52,13 @@ import ( "sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cmd" - "tailscale.com/internal/client/tailscale" + "tailscale.com/client/tailscale/v2" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tsnet" + "tailscale.com/util/must" ) const ( @@ -106,7 +106,8 @@ func runTests(m *testing.M) (int, error) { if err != nil { return 0, err } - if err := os.MkdirAll(tmp, 0755); err != nil { + + if err = os.MkdirAll(tmp, 0755); err != nil { return 0, fmt.Errorf("failed to create temp dir: %w", err) } @@ -122,10 +123,12 @@ func runTests(m *testing.M) (int, error) { kindProvider = cluster.NewProvider( cluster.ProviderWithLogger(cmd.NewLogger()), ) + clusters, err := kindProvider.List() if err != nil { return 0, fmt.Errorf("failed to list kind clusters: %w", err) } + if !slices.Contains(clusters, kindClusterName) { if err := kindProvider.Create(kindClusterName, cluster.CreateWithWaitForReady(5*time.Minute), @@ -147,6 +150,7 @@ func runTests(m *testing.M) (int, error) { if err != nil { return 0, fmt.Errorf("error loading kubeconfig: %w", err) } + kubeClient, err = client.NewWithWatch(restCfg, client.Options{Scheme: tsapi.GlobalScheme}) if err != nil { return 0, fmt.Errorf("error creating Kubernetes client: %w", err) @@ -157,24 +161,28 @@ func runTests(m *testing.M) (int, error) { clientID, clientSecret string // OAuth client for the operator to use. caPaths []string // Extra CA cert file paths to add to images. - certsDir string = filepath.Join(tmp, "certs") // Directory containing extra CA certs to add to images. + certsDir = filepath.Join(tmp, "certs") // Directory containing extra CA certs to add to images. ) if *fDevcontrol { // Deploy pebble and get its certs. - if err := applyPebbleResources(ctx, kubeClient); err != nil { + if err = applyPebbleResources(ctx, kubeClient); err != nil { return 0, fmt.Errorf("failed to apply pebble resources: %w", err) } + pebblePod, err := waitForPodReady(ctx, logger, kubeClient, ns, client.MatchingLabels{"app": "pebble"}) if err != nil { return 0, fmt.Errorf("pebble pod not ready: %w", err) } - if err := forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 15000); err != nil { + + if err = forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 15000); err != nil { return 0, fmt.Errorf("failed to set up port forwarding to pebble: %w", err) } + testCAs = x509.NewCertPool() if ok := testCAs.AppendCertsFromPEM(pebbleMiniCACert); !ok { return 0, fmt.Errorf("failed to parse pebble minica cert") } + var pebbleCAChain []byte for _, path := range []string{"/intermediates/0", "/roots/0"} { pem, err := pebbleGet(ctx, 15000, path) @@ -183,20 +191,25 @@ func runTests(m *testing.M) (int, error) { } pebbleCAChain = append(pebbleCAChain, pem...) } + if ok := testCAs.AppendCertsFromPEM(pebbleCAChain); !ok { return 0, fmt.Errorf("failed to parse pebble ca chain cert") } - if err := os.MkdirAll(certsDir, 0755); err != nil { + + if err = os.MkdirAll(certsDir, 0755); err != nil { return 0, fmt.Errorf("failed to create certs dir: %w", err) } + pebbleCAChainPath := filepath.Join(certsDir, "pebble-ca-chain.crt") - if err := os.WriteFile(pebbleCAChainPath, pebbleCAChain, 0644); err != nil { + if err = os.WriteFile(pebbleCAChainPath, pebbleCAChain, 0644); err != nil { return 0, fmt.Errorf("failed to write pebble CA chain: %w", err) } + pebbleMiniCACertPath := filepath.Join(certsDir, "pebble.minica.crt") - if err := os.WriteFile(pebbleMiniCACertPath, pebbleMiniCACert, 0644); err != nil { + if err = os.WriteFile(pebbleMiniCACertPath, pebbleMiniCACert, 0644); err != nil { return 0, fmt.Errorf("failed to write pebble minica: %w", err) } + caPaths = []string{pebbleCAChainPath, pebbleMiniCACertPath} if !*fSkipCleanup { defer os.RemoveAll(certsDir) @@ -210,13 +223,15 @@ func runTests(m *testing.M) (int, error) { // For Pods -> devcontrol (tailscale clients joining the tailnet): // * Create ssh-server Deployment in cluster. // * Create reverse ssh tunnel that goes from ssh-server port 31544 to localhost:31544. - if err := forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 8055); err != nil { + if err = forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 8055); err != nil { return 0, fmt.Errorf("failed to set up port forwarding to pebble: %w", err) } + privateKey, publicKey, err := readOrGenerateSSHKey(tmp) if err != nil { return 0, fmt.Errorf("failed to read or generate SSH key: %w", err) } + if !*fSkipCleanup { defer os.Remove(privateKeyPath) } @@ -225,6 +240,7 @@ func runTests(m *testing.M) (int, error) { if err != nil { return 0, fmt.Errorf("failed to set up cluster->devcontrol connection: %w", err) } + if !*fSkipCleanup { defer func() { if err := cleanupSSHResources(context.Background(), kubeClient); err != nil { @@ -245,7 +261,7 @@ func runTests(m *testing.M) (int, error) { var apiKeyData struct { APIKey string `json:"apiKey"` } - if err := json.Unmarshal(b, &apiKeyData); err != nil { + if err = json.Unmarshal(b, &apiKeyData); err != nil { return 0, fmt.Errorf("failed to parse api-key.json: %w", err) } if apiKeyData.APIKey == "" { @@ -253,48 +269,27 @@ func runTests(m *testing.M) (int, error) { } // Finish setting up tsClient. - tsClient = tailscale.NewClient("-", tailscale.APIKey(apiKeyData.APIKey)) - tsClient.BaseURL = "http://localhost:31544" + tsClient = &tailscale.Client{ + APIKey: apiKeyData.APIKey, + BaseURL: must.Get(url.Parse("http://localhost:31544")), + } // Set ACLs and create OAuth client. - req, _ := http.NewRequest("POST", tsClient.BuildTailnetURL("acl"), bytes.NewReader(requiredACLs)) - resp, err := tsClient.Do(req) - if err != nil { - return 0, fmt.Errorf("failed to set ACLs: %w", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - b, _ := io.ReadAll(resp.Body) - return 0, fmt.Errorf("HTTP %d setting ACLs: %s", resp.StatusCode, string(b)) + if err = tsClient.PolicyFile().Set(ctx, string(requiredACLs), ""); err != nil { + return 0, fmt.Errorf("failed to set policy file: %w", err) } + logger.Infof("ACLs configured") - reqBody, err := json.Marshal(map[string]any{ - "keyType": "client", - "scopes": []string{"auth_keys", "devices:core", "services"}, - "tags": []string{"tag:k8s-operator"}, - "description": "k8s-operator client for e2e tests", + key, err := tsClient.Keys().CreateOAuthClient(ctx, tailscale.CreateOAuthClientRequest{ + Scopes: []string{"auth_keys", "devices:core", "services"}, + Tags: []string{"tag:k8s-operator"}, + Description: "k8s-operator client for e2e tests", }) if err != nil { return 0, fmt.Errorf("failed to marshal OAuth client creation request: %w", err) } - req, _ = http.NewRequest("POST", tsClient.BuildTailnetURL("keys"), bytes.NewReader(reqBody)) - resp, err = tsClient.Do(req) - if err != nil { - return 0, fmt.Errorf("failed to create OAuth client: %w", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - b, _ := io.ReadAll(resp.Body) - return 0, fmt.Errorf("HTTP %d creating OAuth client: %s", resp.StatusCode, string(b)) - } - var key struct { - ID string `json:"id"` - Key string `json:"key"` - } - if err := json.NewDecoder(resp.Body).Decode(&key); err != nil { - return 0, fmt.Errorf("failed to decode OAuth client creation response: %w", err) - } + clientID = key.ID clientSecret = key.Key } else { @@ -320,8 +315,10 @@ func runTests(m *testing.M) (int, error) { } // An access token will last for an hour which is plenty of time for // the tests to run. No need for token refresh logic. - tsClient = tailscale.NewClient("-", tailscale.APIKey(tk.AccessToken)) - tsClient.BaseURL = "http://localhost:31544" + tsClient = &tailscale.Client{ + APIKey: tk.AccessToken, + BaseURL: must.Get(url.Parse("http://localhost:31544")), + } } var ossTag string @@ -448,18 +445,18 @@ func runTests(m *testing.M) (int, error) { caps.Devices.Create.Ephemeral = true caps.Devices.Create.Tags = []string{"tag:k8s"} - authKey, authKeyMeta, err := tsClient.CreateKey(ctx, caps) + authKey, err := tsClient.Keys().CreateAuthKey(ctx, tailscale.CreateKeyRequest{Capabilities: caps}) if err != nil { return 0, err } - defer tsClient.DeleteKey(context.Background(), authKeyMeta.ID) + defer tsClient.Keys().Delete(context.Background(), authKey.ID) tnClient = &tsnet.Server{ - ControlURL: tsClient.BaseURL, + ControlURL: tsClient.BaseURL.String(), Hostname: "test-proxy", Ephemeral: true, Store: &mem.Store{}, - AuthKey: authKey, + AuthKey: authKey.Key, } _, err = tnClient.Up(ctx) if err != nil { diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 28a836e97..37d0ed014 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -12,7 +12,6 @@ import ( "fmt" "maps" "math/rand/v2" - "net/http" "reflect" "slices" "strings" @@ -30,11 +29,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" - "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/util/clientmetric" @@ -64,7 +64,7 @@ type HAIngressReconciler struct { recorder record.EventRecorder logger *zap.SugaredLogger - tsClient tsClient + clients ClientProvider tsnetServer tsnetServer tsNamespace string defaultTags []string @@ -127,7 +127,7 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque return res, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } - tailscaleClient, err := clientFromProxyGroup(ctx, r.Client, pg, r.tsNamespace, r.tsClient) + tsClient, err := r.clients.For(pg.Spec.Tailnet) if err != nil { return res, fmt.Errorf("failed to get tailscale client: %w", err) } @@ -139,9 +139,9 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque // resulted in another actor overwriting our Tailscale Service update. needsRequeue := false if !ing.DeletionTimestamp.IsZero() || !r.shouldExpose(ing) { - needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger, tailscaleClient, pg) + needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger, tsClient, pg) } else { - needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger, tailscaleClient, pg) + needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger, tsClient, pg) } if err != nil { return res, err @@ -160,12 +160,12 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque // If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. // Returns true if the operation resulted in a Tailscale Service update. -func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { +func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsclient.Client, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { // Currently (2025-05) Tailscale Services are behind an alpha feature flag that // needs to be explicitly enabled for a tailnet to be able to use them. serviceName := tailcfg.ServiceName("svc:" + hostname) - existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) - if err != nil && !isErrorTailscaleServiceNotFound(err) { + existingTSSvc, err := tsClient.VIPServices().Get(ctx, serviceName.String()) + if err != nil && !tailscale.IsNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -341,8 +341,8 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin tsSvcPorts = append(tsSvcPorts, "tcp:80") } - tsSvc := &tailscale.VIPService{ - Name: serviceName, + tsSvc := tailscale.VIPService{ + Name: serviceName.String(), Tags: tags, Ports: tsSvcPorts, Comment: managedTSServiceComment, @@ -357,9 +357,9 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin if existingTSSvc == nil || !reflect.DeepEqual(tsSvc.Tags, existingTSSvc.Tags) || !reflect.DeepEqual(tsSvc.Ports, existingTSSvc.Ports) || - !ownersAreSetAndEqual(tsSvc, existingTSSvc) { + !ownersAreSetAndEqual(tsSvc, *existingTSSvc) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err := tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err := tsClient.VIPServices().CreateOrUpdate(ctx, tsSvc); err != nil { return false, fmt.Errorf("error creating Tailscale Service: %w", err) } } @@ -375,7 +375,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } // 6. Update Ingress status if ProxyGroup Pods are ready. - count, err := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) + count, err := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName.String()) if err != nil { return false, fmt.Errorf("failed to check if any Pods are configured: %w", err) } @@ -440,7 +440,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // operator instances, else the owner reference is cleaned up. Returns true if // the operation resulted in an existing Tailscale Service updates (owner // reference removal). -func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { +func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, logger *zap.SugaredLogger, tsClient tsclient.Client, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { // Get serve config for the ProxyGroup cm, cfg, err := r.proxyGroupServeConfig(ctx, pg.Name) if err != nil { @@ -470,11 +470,11 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, logger if !found { logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName) - tsService, err := tsClient.GetVIPService(ctx, tsSvcName) - if isErrorTailscaleServiceNotFound(err) { + tsService, err := tsClient.VIPServices().Get(ctx, tsSvcName.String()) + switch { + case tailscale.IsNotFound(err): return false, nil - } - if err != nil { + case err != nil: return false, fmt.Errorf("getting Tailscale Service %q: %w", tsSvcName, err) } @@ -519,17 +519,19 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, logger // Ingress is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Ingress. -func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcChanged bool, err error) { +func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsclient.Client, pg *tsapi.ProxyGroup) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Ingress are cleaned up") ix := slices.Index(ing.Finalizers, FinalizerNamePG) if ix < 0 { logger.Debugf("no finalizer, nothing to do") return false, nil } + logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname) serviceName := tailcfg.ServiceName("svc:" + hostname) - svc, err := tsClient.GetVIPService(ctx, serviceName) - if err != nil && !isErrorTailscaleServiceNotFound(err) { + + svc, err := tsClient.VIPServices().Get(ctx, serviceName.String()) + if err != nil && !tailscale.IsNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service: %w", err) } @@ -698,10 +700,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. -func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *tailscale.VIPService, logger *zap.SugaredLogger, tsClient tsClient) (updated bool, _ error) { - if svc == nil { - return false, nil - } +func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *tailscale.VIPService, logger *zap.SugaredLogger, tsClient tsclient.Client) (updated bool, _ error) { o, err := parseOwnerAnnotation(svc) if err != nil { return false, fmt.Errorf("error parsing Tailscale Service's owner annotation") @@ -721,7 +720,7 @@ func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc * } if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", svc.Name) - if err = tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + if err = tsClient.VIPServices().Delete(ctx, svc.Name); err != nil && !tailscale.IsNotFound(err) { return false, err } @@ -735,7 +734,7 @@ func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc * return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) - return true, tsClient.CreateOrUpdateVIPService(ctx, svc) + return true, tsClient.VIPServices().CreateOrUpdate(ctx, *svc) } // isHTTPEndpointEnabled returns true if the Ingress has been configured to expose an HTTP endpoint to tailnet. @@ -819,7 +818,7 @@ func (r *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } -func numberPodsAdvertising(ctx context.Context, cl client.Client, tsNamespace, pgName string, serviceName tailcfg.ServiceName) (int, error) { +func numberPodsAdvertising(ctx context.Context, cl client.Client, tsNamespace, pgName string, serviceName string) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} if err := cl.List(ctx, secrets, client.InNamespace(tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { @@ -835,7 +834,7 @@ func numberPodsAdvertising(ctx context.Context, cl client.Client, tsNamespace, p if !ok { continue } - if slices.Contains(prefs.AdvertiseServices, serviceName.String()) { + if slices.Contains(prefs.AdvertiseServices, serviceName) { count++ } } @@ -912,6 +911,10 @@ func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string] // parseOwnerAnnotation returns nil if no valid owner found. func parseOwnerAnnotation(tsSvc *tailscale.VIPService) (*ownerAnnotationValue, error) { + if tsSvc == nil { + return nil, nil + } + if tsSvc.Annotations == nil || tsSvc.Annotations[ownerAnnotation] == "" { return nil, nil } @@ -922,9 +925,8 @@ func parseOwnerAnnotation(tsSvc *tailscale.VIPService) (*ownerAnnotationValue, e return o, nil } -func ownersAreSetAndEqual(a, b *tailscale.VIPService) bool { - return a != nil && b != nil && - a.Annotations != nil && b.Annotations != nil && +func ownersAreSetAndEqual(a, b tailscale.VIPService) bool { + return a.Annotations != nil && b.Annotations != nil && a.Annotations[ownerAnnotation] != "" && b.Annotations[ownerAnnotation] != "" && strings.EqualFold(a.Annotations[ownerAnnotation], b.Annotations[ownerAnnotation]) @@ -1107,11 +1109,6 @@ func hasCerts(ctx context.Context, cl client.Client, ns string, svc tailcfg.Serv return len(cert) > 0 && len(key) > 0, nil } -func isErrorTailscaleServiceNotFound(err error) bool { - errResp, ok := errors.AsType[tailscale.ErrResponse](err) - return ok && errResp.Status == http.StatusNotFound -} - func tagViolations(obj client.Object) []string { var violations []string if obj == nil { diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 33e27ef37..8312dc5f7 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -25,11 +25,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/client/tailscale/v2" - "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" ) @@ -88,7 +89,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") // Verify Tailscale Service uses custom tags - tsSvc, err := ft.GetVIPService(t.Context(), "svc:my-svc") + tsSvc, err := ft.VIPServices().Get(t.Context(), "svc:my-svc") if err != nil { t.Fatalf("getting Tailscale Service: %v", err) } @@ -259,7 +260,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, ing3.Namespace, ing3.Name) // Delete the service from "control" - ft.vipServices = make(map[tailcfg.ServiceName]*tailscale.VIPService) + ft.vipServices = make(map[string]tailscale.VIPService) // Delete the ingress and confirm we don't get stuck due to the VIP service not existing. if err = fc.Delete(t.Context(), ing3); err != nil { @@ -319,11 +320,11 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"}) verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:updated-svc"}) - _, err := ft.GetVIPService(context.Background(), "svc:my-svc") + _, err := ft.VIPServices().Get(context.Background(), "svc:my-svc") if err == nil { t.Fatalf("svc:my-svc not cleaned up") } - if !isErrorTailscaleServiceNotFound(err) { + if !tailscale.IsNotFound(err) { t.Fatalf("unexpected error: %v", err) } } @@ -877,20 +878,18 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { mustCreate(t, fc, ing) // Simulate existing Tailscale Service from another cluster - existingVIPSvc := &tailscale.VIPService{ + existingVIPSvc := tailscale.VIPService{ Name: "svc:my-svc", Annotations: map[string]string{ ownerAnnotation: `{"ownerrefs":[{"operatorID":"operator-2"}]}`, }, } - ft.vipServices = map[tailcfg.ServiceName]*tailscale.VIPService{ - "svc:my-svc": existingVIPSvc, - } + ft.VIPServices().CreateOrUpdate(t.Context(), existingVIPSvc) // Verify reconciliation adds our operator reference expectReconciled(t, ingPGR, "default", "test-ingress") - tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + tsSvc, err := ft.VIPServices().Get(context.Background(), "svc:my-svc") if err != nil { t.Fatalf("getting Tailscale Service: %v", err) } @@ -917,7 +916,7 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { } expectRequeue(t, ingPGR, "default", "test-ingress") - tsSvc, err = ft.GetVIPService(context.Background(), "svc:my-svc") + tsSvc, err = ft.VIPServices().Get(context.Background(), "svc:my-svc") if err != nil { t.Fatalf("getting Tailscale Service after deletion: %v", err) } @@ -1024,7 +1023,7 @@ func populateTLSSecret(t *testing.T, c client.Client, pgName, domain string) { func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { t.Helper() - tsSvc, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(serviceName)) + tsSvc, err := ft.VIPServices().Get(context.Background(), serviceName) if err != nil { t.Fatalf("getting Tailscale Service %q: %v", serviceName, err) } @@ -1203,7 +1202,9 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} - ft := &fakeTSClient{} + ft := &fakeTSClient{ + vipServices: make(map[string]tailscale.VIPService), + } zl, err := zap.NewDevelopment() if err != nil { t.Fatal(err) @@ -1211,7 +1212,7 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT ingPGR := &HAIngressReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, tsNamespace: "operator-ns", tsnetServer: fakeTsnetServer, diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 138119306..c2a1198cc 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -21,8 +21,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/client/tailscale/v2" + "tailscale.com/ipn" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/util/mak" @@ -30,7 +33,9 @@ import ( func TestTailscaleIngress(t *testing.T) { fc := fake.NewFakeClient(ingressClass()) - ft := &fakeTSClient{} + ft := &fakeTSClient{ + vipServices: make(map[string]tailscale.VIPService), + } fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} zl, err := zap.NewDevelopment() if err != nil { @@ -41,7 +46,7 @@ func TestTailscaleIngress(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", @@ -130,7 +135,7 @@ func TestTailscaleIngressHostname(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", @@ -269,7 +274,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", @@ -378,7 +383,7 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", @@ -530,7 +535,7 @@ func TestIngressProxyClassAnnotation(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: &fakeTSClient{}, + clients: tsclient.NewProvider(&fakeTSClient{}), tsnetServer: &fakeTSNetServer{certDomains: []string{"test-host"}}, defaultTags: []string{"tag:test"}, operatorNamespace: "operator-ns", @@ -601,7 +606,7 @@ func TestIngressLetsEncryptStaging(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: &fakeTSClient{}, + clients: tsclient.NewProvider(&fakeTSClient{}), tsnetServer: &fakeTSNetServer{certDomains: []string{"test-host"}}, defaultTags: []string{"tag:test"}, operatorNamespace: "operator-ns", @@ -710,7 +715,7 @@ func TestEmptyPath(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", @@ -853,7 +858,7 @@ func TestTailscaleIngressWithHTTPRedirect(t *testing.T) { ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index d353c5333..2e71fc845 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -46,9 +46,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/envknob" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -57,6 +57,7 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" "tailscale.com/k8s-operator/reconciler/tailnet" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" "tailscale.com/tstime" @@ -84,10 +85,6 @@ const ( ) func main() { - // Required to use our client API. We're fine with the instability since the - // client lives in the same repo as this code. - tailscale.I_Acknowledge_This_API_Is_Unstable = true - var ( tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "") tslogging = defaultEnv("OPERATOR_LOGGING", "info") @@ -155,7 +152,7 @@ func main() { })) } - rOpts := reconcilerOpts{ + runReconcilers(reconcilerOpts{ log: zlog, tsServer: s, tsClient: tsc, @@ -170,15 +167,14 @@ func main() { defaultProxyClass: defaultProxyClass, loginServer: loginServer, ingressClassName: ingressClassName, - } - runReconcilers(rOpts) + }) } // initTSNet initializes the tsnet.Server and logs in to Tailscale. If CLIENT_ID // is set, it authenticates to the Tailscale API using the federated OIDC workload // identity flow. Otherwise, it uses the CLIENT_ID_FILE and CLIENT_SECRET_FILE // environment variables to authenticate with static credentials. -func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) { +func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, *tailscale.Client) { var ( clientID = defaultEnv("CLIENT_ID", "") // Used for workload identity federation. clientIDPath = defaultEnv("CLIENT_ID_FILE", "") // Used for static client credentials. @@ -187,19 +183,23 @@ func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsCl kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") ) + startlog := zlog.Named("startup") if clientID == "" && (clientIDPath == "" || clientSecretPath == "") { startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") // TODO(tomhjp): error message can mention WIF once it's publicly available. } + tsc, err := newTSClient(zlog.Named("ts-api-client"), clientID, clientIDPath, clientSecretPath, loginServer) if err != nil { startlog.Fatalf("error creating Tailscale client: %v", err) } + s := &tsnet.Server{ Hostname: hostname, Logf: zlog.Named("tailscaled").Debugf, ControlURL: loginServer, } + if p := os.Getenv("TS_PORT"); p != "" { port, err := strconv.ParseUint(p, 10, 16) if err != nil { @@ -207,6 +207,7 @@ func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsCl } s.Port = uint16(port) } + if kubeSecret != "" { st, err := kubestore.New(logger.Discard, kubeSecret) if err != nil { @@ -214,6 +215,7 @@ func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsCl } s.Store = st } + if err := s.Start(); err != nil { startlog.Fatalf("starting tailscale server: %v", err) } @@ -239,27 +241,29 @@ waitOnline: if loginDone { break } - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Preauthorized: true, - Tags: strings.Split(operatorTags, ","), - }, - }, - } - authkey, _, err := tsc.CreateKey(ctx, caps) + + var caps tailscale.KeyCapabilities + caps.Devices.Create.Reusable = false + caps.Devices.Create.Preauthorized = true + caps.Devices.Create.Tags = strings.Split(operatorTags, ",") + + authKey, err := tsc.Keys().CreateAuthKey(ctx, tailscale.CreateKeyRequest{Capabilities: caps}) if err != nil { startlog.Fatalf("creating operator authkey: %v", err) } - if err := lc.Start(ctx, ipn.Options{ - AuthKey: authkey, - }); err != nil { + + opts := ipn.Options{ + AuthKey: authKey.Key, + } + + if err = lc.Start(ctx, opts); err != nil { startlog.Fatalf("starting tailscale: %v", err) } - if err := lc.StartLoginInteractive(ctx); err != nil { + + if err = lc.StartLoginInteractive(ctx); err != nil { startlog.Fatalf("starting login: %v", err) } + startlog.Debugf("requested login by authkey") loginDone = true case "NeedsMachineAuth": @@ -286,6 +290,12 @@ func serviceManagedResourceFilterPredicate() predicate.Predicate { }) } +type ( + ClientProvider interface { + For(tailnet string) (tsclient.Client, error) + } +) + // runReconcilers starts the controller-runtime manager and registers the // ServiceReconciler. It blocks forever. func runReconcilers(opts reconcilerOpts) { @@ -334,11 +344,14 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create manager: %v", err) } + clients := tsclient.NewProvider(tsclient.Wrap(opts.tsClient)) + tailnetOptions := tailnet.ReconcilerOptions{ Client: mgr.GetClient(), TailscaleNamespace: opts.tailscaleNamespace, Clock: tstime.DefaultClock{}, Logger: opts.log, + Registry: clients, } if err = tailnet.NewReconciler(tailnetOptions).Register(mgr); err != nil { @@ -368,7 +381,7 @@ func runReconcilers(opts reconcilerOpts) { ssr := &tailscaleSTSReconciler{ Client: mgr.GetClient(), tsnetServer: opts.tsServer, - tsClient: opts.tsClient, + clients: clients, defaultTags: strings.Split(opts.proxyTags, ","), operatorNamespace: opts.tailscaleNamespace, proxyImage: opts.proxyImage, @@ -460,7 +473,7 @@ func runReconcilers(opts reconcilerOpts) { Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ recorder: eventRecorder, - tsClient: opts.tsClient, + clients: clients, tsnetServer: opts.tsServer, defaultTags: strings.Split(opts.proxyTags, ","), Client: mgr.GetClient(), @@ -486,7 +499,7 @@ func runReconcilers(opts reconcilerOpts) { Watches(&discoveryv1.EndpointSlice{}, ingressSvcFromEpsFilter). Complete(&HAServiceReconciler{ recorder: eventRecorder, - tsClient: opts.tsClient, + clients: clients, defaultTags: strings.Split(opts.proxyTags, ","), Client: mgr.GetClient(), logger: opts.log.Named("service-pg-reconciler"), @@ -684,7 +697,7 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), log: opts.log.Named("recorder-reconciler"), clock: tstime.DefaultClock{}, - tsClient: opts.tsClient, + clients: clients, loginServer: opts.loginServer, }) if err != nil { @@ -706,7 +719,7 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), recorder: eventRecorder, logger: opts.log.Named("kube-apiserver-ts-service-reconciler"), - tsClient: opts.tsClient, + clients: clients, tsNamespace: opts.tailscaleNamespace, defaultTags: strings.Split(opts.proxyTags, ","), operatorID: id, @@ -738,7 +751,7 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), log: opts.log.Named("proxygroup-reconciler"), clock: tstime.DefaultClock{}, - tsClient: opts.tsClient, + clients: clients, tsNamespace: opts.tailscaleNamespace, tsProxyImage: opts.proxyImage, @@ -763,7 +776,7 @@ func runReconcilers(opts reconcilerOpts) { type reconcilerOpts struct { log *zap.SugaredLogger tsServer *tsnet.Server - tsClient tsClient + tsClient *tailscale.Client tailscaleNamespace string // namespace in which operator resources will be deployed restConfig *rest.Config // config for connecting to the kube API server proxyImage string // : diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 305b1738c..3ee6deea7 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -24,8 +24,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/net/dns/resolvconffile" "tailscale.com/tstest" @@ -43,7 +45,7 @@ func TestLoadBalancerClass(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -62,7 +64,7 @@ func TestLoadBalancerClass(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ AnnotationTailnetTargetFQDN: "invalid.example.com", }, @@ -203,7 +205,7 @@ func TestLoadBalancerClass(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -223,7 +225,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -241,7 +243,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ AnnotationTailnetTargetFQDN: tailnetTargetFQDN, }, @@ -333,7 +335,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -351,7 +353,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ AnnotationTailnetTargetIP: tailnetTargetIP, }, @@ -442,7 +444,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -457,7 +459,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ AnnotationTailnetTargetIP: tailnetTargetIP, }, @@ -510,7 +512,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -525,7 +527,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ AnnotationTailnetTargetIP: tailnetTargetIP, }, @@ -578,7 +580,7 @@ func TestAnnotations(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -596,7 +598,7 @@ func TestAnnotations(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ "tailscale.com/expose": "true", }, @@ -663,7 +665,7 @@ func TestAnnotations(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -682,7 +684,7 @@ func TestAnnotationIntoLB(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -700,7 +702,7 @@ func TestAnnotationIntoLB(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ "tailscale.com/expose": "true", }, @@ -779,7 +781,7 @@ func TestAnnotationIntoLB(t *testing.T) { Name: "test", Namespace: "default", Finalizers: []string{"tailscale.com/finalizer"}, - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -812,7 +814,7 @@ func TestLBIntoAnnotation(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -830,7 +832,7 @@ func TestLBIntoAnnotation(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -925,7 +927,7 @@ func TestLBIntoAnnotation(t *testing.T) { Annotations: map[string]string{ "tailscale.com/expose": "true", }, - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -947,7 +949,7 @@ func TestCustomHostname(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -965,7 +967,7 @@ func TestCustomHostname(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ "tailscale.com/expose": "true", "tailscale.com/hostname": "reindeer-flotilla", @@ -1034,7 +1036,7 @@ func TestCustomHostname(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ "tailscale.com/hostname": "reindeer-flotilla", }, @@ -1056,7 +1058,7 @@ func TestCustomPriorityClassName(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -1075,7 +1077,7 @@ func TestCustomPriorityClassName(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ "tailscale.com/expose": "true", "tailscale.com/hostname": "tailscale-critical", @@ -1212,7 +1214,7 @@ func TestServiceProxyClassAnnotation(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -1308,7 +1310,7 @@ func TestProxyClassForService(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -1326,7 +1328,7 @@ func TestProxyClassForService(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -1397,7 +1399,7 @@ func TestDefaultLoadBalancer(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -1416,7 +1418,7 @@ func TestDefaultLoadBalancer(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -1451,7 +1453,7 @@ func TestProxyFirewallMode(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -1471,7 +1473,7 @@ func TestProxyFirewallMode(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -1541,7 +1543,7 @@ func Test_HeadlessService(t *testing.T) { Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ AnnotationExpose: "true", }, @@ -1825,7 +1827,7 @@ func Test_authKeyRemoval(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -1838,7 +1840,7 @@ func Test_authKeyRemoval(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", @@ -1890,7 +1892,7 @@ func Test_externalNameService(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -1908,7 +1910,7 @@ func Test_externalNameService(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ AnnotationExpose: "true", }, @@ -1984,7 +1986,7 @@ func Test_metricsResourceCreation(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), operatorNamespace: "operator-ns", }, logger: zl.Sugar(), @@ -2055,7 +2057,7 @@ func TestIgnorePGService(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", @@ -2073,7 +2075,7 @@ func TestIgnorePGService(t *testing.T) { // The apiserver is supposed to set the UID, but the fake client // doesn't. So, set it explicitly because other code later depends // on it being set. - UID: types.UID("1234-UID"), + UID: "1234-UID", Annotations: map[string]string{ "tailscale.com/proxygroup": "test-pg", }, diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 4d5a795d7..4bd015701 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "fmt" - "net/http" "net/netip" "slices" "sort" @@ -33,11 +32,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" - "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/egressservices" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" @@ -85,7 +85,7 @@ type ProxyGroupReconciler struct { log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock - tsClient tsClient + clients ClientProvider // User-specified defaults from the helm installation. tsNamespace string @@ -122,7 +122,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com ProxyGroup: %w", err) } - tailscaleClient, loginUrl, err := r.getClientAndLoginURL(ctx, pg.Spec.Tailnet) + tsClient, err := r.clients.For(pg.Spec.Tailnet) if err != nil { oldPGStatus := pg.Status.DeepCopy() nrr := ¬ReadyReason{ @@ -141,7 +141,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ return reconcile.Result{}, nil } - if done, err := r.maybeCleanup(ctx, tailscaleClient, pg); err != nil { + if done, err := r.maybeCleanup(ctx, tsClient, pg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return reconcile.Result{}, nil @@ -160,7 +160,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } oldPGStatus := pg.Status.DeepCopy() - staticEndpoints, nrr, err := r.reconcilePG(ctx, tailscaleClient, loginUrl, pg, logger) + staticEndpoints, nrr, err := r.reconcilePG(ctx, tsClient, pg, logger) return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, staticEndpoints)) } @@ -168,7 +168,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ // for deletion. It is separated out from Reconcile to make a clear separation // between reconciling the ProxyGroup, and posting the status of its created // resources onto the ProxyGroup status field. -func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tailscaleClient tsClient, loginUrl string, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tsClient tsclient.Client, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { if !slices.Contains(pg.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -209,7 +209,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tailscaleClient return notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) } - staticEndpoints, nrr, err := r.maybeProvision(ctx, tailscaleClient, loginUrl, pg, proxyClass) + staticEndpoints, nrr, err := r.maybeProvision(ctx, tsClient, pg, proxyClass) if err != nil { return nil, nrr, err } @@ -295,7 +295,7 @@ func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGrou return errors.Join(errs...) } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, loginUrl string, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tsClient tsclient.Client, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { logger := r.logger(pg.Name) r.mu.Lock() r.ensureStateAddedForProxyGroup(pg) @@ -317,7 +317,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClie } } - staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, tailscaleClient, loginUrl, pg, proxyClass, svcToNodePorts) + staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, tsClient, pg, proxyClass, svcToNodePorts) if err != nil { if _, ok := errors.AsType[*FindStaticEndpointErr](err); ok { reason := reasonProxyGroupCreationFailed @@ -428,7 +428,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClie return r.notReadyErrf(pg, logger, "error reconciling metrics resources: %w", err) } - if err := r.cleanupDanglingResources(ctx, tailscaleClient, pg, proxyClass); err != nil { + if err := r.cleanupDanglingResources(ctx, tsClient, pg, proxyClass); err != nil { return r.notReadyErrf(pg, logger, "error cleaning up dangling resources: %w", err) } @@ -625,7 +625,7 @@ func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, // cleanupDanglingResources ensures we don't leak config secrets, state secrets, and // tailnet devices when the number of replicas specified is reduced. -func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { +func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tsClient tsclient.Client, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { logger := r.logger(pg.Name) metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) if err != nil { @@ -639,7 +639,7 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tai // Dangling resource, delete the config + state Secrets, as well as // deleting the device from the tailnet. - if err := r.ensureDeviceDeleted(ctx, tailscaleClient, m.tsID, logger); err != nil { + if err := r.ensureDeviceDeleted(ctx, tsClient, m.tsID, logger); err != nil { return err } if err := r.Delete(ctx, m.stateSecret); err != nil && !apierrors.IsNotFound(err) { @@ -682,7 +682,7 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tai // maybeCleanup just deletes the device from the tailnet. All the kubernetes // resources linked to a ProxyGroup will get cleaned up via owner references // (which we can use because they are all in the same namespace). -func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup) (bool, error) { +func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tsClient tsclient.Client, pg *tsapi.ProxyGroup) (bool, error) { logger := r.logger(pg.Name) metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) @@ -691,7 +691,7 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient } for _, m := range metadata { - if err := r.ensureDeviceDeleted(ctx, tailscaleClient, m.tsID, logger); err != nil { + if err := r.ensureDeviceDeleted(ctx, tsClient, m.tsID, logger); err != nil { return false, err } } @@ -712,25 +712,23 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient return true, nil } -func (r *ProxyGroupReconciler) ensureDeviceDeleted(ctx context.Context, tailscaleClient tsClient, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { +func (r *ProxyGroupReconciler) ensureDeviceDeleted(ctx context.Context, tsClient tsclient.Client, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { logger.Debugf("deleting device %s from control", string(id)) - if err := tailscaleClient.DeleteDevice(ctx, string(id)); err != nil { - if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) - } else { - return fmt.Errorf("error deleting device: %w", err) - } - } else { - logger.Debugf("device %s deleted from control", string(id)) + err := tsClient.Devices().Delete(ctx, string(id)) + switch { + case tailscale.IsNotFound(err): + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) + case err != nil: + return fmt.Errorf("error deleting device: %w", err) } + logger.Debugf("device %s deleted from control", string(id)) return nil } func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( ctx context.Context, - tailscaleClient tsClient, - loginUrl string, + tsClient tsclient.Client, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass, svcToNodePorts map[string]uint16, @@ -756,7 +754,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( return nil, err } - authKey, err := r.getAuthKey(ctx, tailscaleClient, pg, existingCfgSecret, i, logger) + authKey, err := r.getAuthKey(ctx, tsClient, pg, existingCfgSecret, i, logger) if err != nil { return nil, err } @@ -838,8 +836,8 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( } } - if loginUrl != "" { - cfg.ServerURL = new(loginUrl) + if tsClient.LoginURL() != "" { + cfg.ServerURL = new(tsClient.LoginURL()) } if proxyClass != nil && proxyClass.Spec.TailscaleConfig != nil { @@ -867,7 +865,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( return nil, err } - configs, err := pgTailscaledConfig(pg, loginUrl, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices) + configs, err := pgTailscaledConfig(pg, tsClient.LoginURL(), proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -904,7 +902,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( // A new key is created if the config Secret doesn't exist yet, or if the // proxy has requested a reissue via its state Secret. An existing key is // retained while the device hasn't authed or a reissue is in progress. -func (r *ProxyGroupReconciler) getAuthKey(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, existingCfgSecret *corev1.Secret, ordinal int32, logger *zap.SugaredLogger) (*string, error) { +func (r *ProxyGroupReconciler) getAuthKey(ctx context.Context, tsClient tsclient.Client, pg *tsapi.ProxyGroup, existingCfgSecret *corev1.Secret, ordinal int32, logger *zap.SugaredLogger) (*string, error) { // Get state Secret to check if it's already authed or has requested // a fresh auth key. stateSecret := &corev1.Secret{ @@ -931,7 +929,7 @@ func (r *ProxyGroupReconciler) getAuthKey(ctx context.Context, tailscaleClient t if !createAuthKey { var err error - createAuthKey, err = r.shouldReissueAuthKey(ctx, tailscaleClient, pg, stateSecret, cfgAuthKey) + createAuthKey, err = r.shouldReissueAuthKey(ctx, tsClient, pg, stateSecret, cfgAuthKey) if err != nil { return nil, err } @@ -945,7 +943,7 @@ func (r *ProxyGroupReconciler) getAuthKey(ctx context.Context, tailscaleClient t if len(tags) == 0 { tags = r.defaultTags } - key, err := newAuthKey(ctx, tailscaleClient, tags) + key, err := newAuthKey(ctx, tsClient, tags) if err != nil { return nil, err } @@ -965,7 +963,7 @@ func (r *ProxyGroupReconciler) getAuthKey(ctx context.Context, tailscaleClient t // shouldReissueAuthKey returns true if the proxy needs a new auth key. It // tracks in-flight reissues via authKeyReissuing to avoid duplicate API calls // across reconciles. -func (r *ProxyGroupReconciler) shouldReissueAuthKey(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, stateSecret *corev1.Secret, cfgAuthKey *string) (shouldReissue bool, err error) { +func (r *ProxyGroupReconciler) shouldReissueAuthKey(ctx context.Context, tsClient tsclient.Client, pg *tsapi.ProxyGroup, stateSecret *corev1.Secret, cfgAuthKey *string) (shouldReissue bool, err error) { r.mu.Lock() reissuing := r.authKeyReissuing[stateSecret.Name] r.mu.Unlock() @@ -1017,7 +1015,7 @@ func (r *ProxyGroupReconciler) shouldReissueAuthKey(ctx context.Context, tailsca r.log.Infof("Proxy failing to auth; attempting cleanup and new key") if tsID := stateSecret.Data[kubetypes.KeyDeviceID]; len(tsID) > 0 { id := tailcfg.StableNodeID(tsID) - if err := r.ensureDeviceDeleted(ctx, tailscaleClient, id, r.log); err != nil { + if err = r.ensureDeviceDeleted(ctx, tsClient, id, r.log); err != nil { return false, err } } @@ -1305,29 +1303,6 @@ func (r *ProxyGroupReconciler) getRunningProxies(ctx context.Context, pg *tsapi. return devices, nil } -// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL -// for the given tailnet name. If no tailnet is specified, returns the default client -// and login server. Applies fallback to the operator's login server if the tailnet -// doesn't specify a custom login URL. -func (r *ProxyGroupReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, - string, error) { - if tailnetName == "" { - return r.tsClient, r.loginServer, nil - } - - tc, loginUrl, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) - if err != nil { - return nil, "", err - } - - // Apply fallback if tailnet doesn't specify custom login URL - if loginUrl == "" { - loginUrl = r.loginServer - } - - return tc, loginUrl, nil -} - type nodeMetadata struct { ordinal int32 stateSecret *corev1.Secret diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 1a50ee1f0..95eb7bd5f 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -30,10 +30,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "tailscale.com/client/tailscale" + "tailscale.com/client/tailscale/v2" + "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" @@ -43,7 +45,6 @@ import ( const ( testProxyImage = "tailscale/tailscale:test" - initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" ) var ( @@ -641,7 +642,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { defaultProxyClass: "default-pc", Client: fc, - tsClient: tsClient, + clients: tsclient.NewProvider(tsClient), recorder: fr, clock: cl, authKeyRateLimits: make(map[string]*rate.Limiter), @@ -649,7 +650,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { } for i, r := range tt.reconciles { - createdNodes := []corev1.Node{} + var createdNodes []corev1.Node t.Run(tt.name, func(t *testing.T) { for _, n := range r.nodes { no := &corev1.Node{ @@ -786,7 +787,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { defaultProxyClass: "default-pc", Client: fc, - tsClient: tsClient, + clients: tsclient.NewProvider(tsClient), recorder: fr, log: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), clock: cl, @@ -849,7 +850,7 @@ func TestProxyGroup(t *testing.T) { defaultProxyClass: "default-pc", Client: fc, - tsClient: tsClient, + clients: tsclient.NewProvider(tsClient), recorder: fr, log: zl.Sugar(), clock: cl, @@ -908,17 +909,13 @@ func TestProxyGroup(t *testing.T) { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } expectProxyGroupResources(t, fc, pg, true, pc) - keyReq := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Ephemeral: false, - Preauthorized: true, - Tags: []string{"tag:test-tag"}, - }, - }, - } - if diff := cmp.Diff(tsClient.KeyRequests(), []tailscale.KeyCapabilities{keyReq, keyReq}); diff != "" { + var keyReq tailscale.KeyCapabilities + keyReq.Devices.Create.Reusable = false + keyReq.Devices.Create.Ephemeral = false + keyReq.Devices.Create.Preauthorized = true + keyReq.Devices.Create.Tags = []string{"tag:test-tag"} + + if diff := cmp.Diff(tsClient.keyRequests, []tailscale.KeyCapabilities{keyReq, keyReq}); diff != "" { t.Fatalf("unexpected secrets (-got +want):\n%s", diff) } }) @@ -1059,7 +1056,7 @@ func TestProxyGroupTypes(t *testing.T) { tsProxyImage: testProxyImage, Client: fc, log: zl.Sugar(), - tsClient: &fakeTSClient{}, + clients: tsclient.NewProvider(&fakeTSClient{}), clock: tstest.NewClock(tstest.ClockOpts{}), authKeyRateLimits: make(map[string]*rate.Limiter), authKeyReissuing: make(map[string]bool), @@ -1301,7 +1298,7 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) { tsProxyImage: testProxyImage, Client: fc, log: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, + clients: tsclient.NewProvider(&fakeTSClient{}), clock: tstest.NewClock(tstest.ClockOpts{}), authKeyRateLimits: make(map[string]*rate.Limiter), authKeyReissuing: make(map[string]bool), @@ -1356,7 +1353,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { tsProxyImage: testProxyImage, Client: fc, log: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, + clients: tsclient.NewProvider(&fakeTSClient{}), clock: tstest.NewClock(tstest.ClockOpts{}), authKeyRateLimits: make(map[string]*rate.Limiter), authKeyReissuing: make(map[string]bool), @@ -1443,7 +1440,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { tsProxyImage: testProxyImage, Client: fc, log: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, + clients: tsclient.NewProvider(&fakeTSClient{}), clock: tstest.NewClock(tstest.ClockOpts{}), authKeyRateLimits: make(map[string]*rate.Limiter), authKeyReissuing: make(map[string]bool), @@ -1713,7 +1710,7 @@ func TestProxyGroupGetAuthKey(t *testing.T) { tsFirewallMode: "auto", Client: fc, - tsClient: tsClient, + clients: tsclient.NewProvider(tsClient), recorder: fr, log: zl.Sugar(), clock: cl, @@ -2109,7 +2106,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { defaultTags: []string{"tag:test"}, defaultProxyClass: tt.defaultProxyClass, Client: fc, - tsClient: &fakeTSClient{}, + clients: tsclient.NewProvider(&fakeTSClient{}), log: zl.Sugar(), clock: cl, authKeyRateLimits: make(map[string]*rate.Limiter), diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 519f81fe0..e460be684 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -12,7 +12,6 @@ import ( "errors" "fmt" "maps" - "net/http" "os" "path" "slices" @@ -30,11 +29,12 @@ import ( "k8s.io/apiserver/pkg/storage/names" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "tailscale.com/client/tailscale/v2" - "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/net/netutil" "tailscale.com/tailcfg" @@ -174,7 +174,7 @@ type tsnetServer interface { type tailscaleSTSReconciler struct { client.Client tsnetServer tsnetServer - tsClient tsClient + clients ClientProvider defaultTags []string operatorNamespace string proxyImage string @@ -183,9 +183,9 @@ type tailscaleSTSReconciler struct { loginServer string } -func (sts tailscaleSTSReconciler) validate() error { - if sts.tsFirewallMode != "" && !isValidFirewallMode(sts.tsFirewallMode) { - return fmt.Errorf("invalid proxy firewall mode %s, valid modes are iptables, nftables or unset", sts.tsFirewallMode) +func (r *tailscaleSTSReconciler) validate() error { + if r.tsFirewallMode != "" && !isValidFirewallMode(r.tsFirewallMode) { + return fmt.Errorf("invalid proxy firewall mode %s, valid modes are iptables, nftables or unset", r.tsFirewallMode) } return nil } @@ -197,22 +197,17 @@ func IsHTTPSEnabledOnTailnet(tsnetServer tsnetServer) bool { // Provision ensures that the StatefulSet for the given service is running and // up to date. -func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) { - tailscaleClient, loginUrl, err := a.getClientAndLoginURL(ctx, sts.Tailnet) - if err != nil { - return nil, fmt.Errorf("failed to get tailscale client and loginUrl: %w", err) - } - +func (r *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) { // Do full reconcile. // TODO (don't create Service for the Connector) - hsvc, err := a.reconcileHeadlessService(ctx, logger, sts) + hsvc, err := r.reconcileHeadlessService(ctx, logger, sts) if err != nil { return nil, fmt.Errorf("failed to reconcile headless service: %w", err) } proxyClass := new(tsapi.ProxyClass) if sts.ProxyClassName != "" { - if err := a.Get(ctx, types.NamespacedName{Name: sts.ProxyClassName}, proxyClass); err != nil { + if err := r.Get(ctx, types.NamespacedName{Name: sts.ProxyClassName}, proxyClass); err != nil { return nil, fmt.Errorf("failed to get ProxyClass: %w", err) } if !tsoperator.ProxyClassIsReady(proxyClass) { @@ -222,12 +217,17 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretNames, err := a.provisionSecrets(ctx, tailscaleClient, loginUrl, sts, hsvc, logger) + tsClient, err := r.clients.For(sts.Tailnet) + if err != nil { + return nil, fmt.Errorf("failed to get tailscale client: %w", err) + } + + secretNames, err := r.provisionSecrets(ctx, tsClient, sts, hsvc, logger) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretNames) + _, err = r.reconcileSTS(ctx, logger, sts, hsvc, secretNames) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -237,48 +237,20 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga proxyLabels: hsvc.Labels, proxyType: sts.proxyType, } - if err = reconcileMetricsResources(ctx, logger, mo, sts.ProxyClass, a.Client); err != nil { + if err = reconcileMetricsResources(ctx, logger, mo, sts.ProxyClass, r.Client); err != nil { return nil, fmt.Errorf("failed to ensure metrics resources: %w", err) } return hsvc, nil } -// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL -// for the given tailnet name. If no tailnet is specified, returns the default client -// and login server. Applies fallback to the operator's login server if the tailnet -// doesn't specify a custom login URL. -func (a *tailscaleSTSReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, - string, error) { - if tailnetName == "" { - return a.tsClient, a.loginServer, nil - } - - tc, loginUrl, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnetName) - if err != nil { - return nil, "", err - } - - // Apply fallback if tailnet doesn't specify custom login URL - if loginUrl == "" { - loginUrl = a.loginServer - } - - return tc, loginUrl, nil -} - // Cleanup removes all resources associated that were created by Provision with // the given labels. It returns true when all resources have been removed, // otherwise it returns false and the caller should retry later. -func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { - tailscaleClient := a.tsClient - if tailnet != "" { - tc, _, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnet) - if err != nil { - logger.Errorf("failed to get tailscale client: %v", err) - return false, nil - } - - tailscaleClient = tc +func (r *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { + tsClient, err := r.clients.For(tailnet) + if err != nil { + logger.Errorf("failed to get tailscale client: %v", err) + return false, nil } // Need to delete the StatefulSet first, and delete it with foreground @@ -287,7 +259,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, lo // assuming k8s ordering semantics don't mess with us, that should avoid // tailscale device deletion races where we fail to notice a device that // should be removed. - sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, labels) + sts, err := getSingleObject[appsv1.StatefulSet](ctx, r.Client, r.operatorNamespace, labels) if err != nil { return false, fmt.Errorf("getting statefulset: %w", err) } @@ -301,12 +273,12 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, lo } options := []client.DeleteAllOfOption{ - client.InNamespace(a.operatorNamespace), + client.InNamespace(r.operatorNamespace), client.MatchingLabels(labels), client.PropagationPolicy(metav1.DeletePropagationForeground), } - if err = a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, options...); err != nil { + if err = r.DeleteAllOf(ctx, &appsv1.StatefulSet{}, options...); err != nil { return false, fmt.Errorf("deleting statefulset: %w", err) } @@ -314,7 +286,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, lo return false, nil } - devices, err := a.DeviceInfo(ctx, labels, logger) + devices, err := r.DeviceInfo(ctx, labels, logger) if err != nil { return false, fmt.Errorf("getting device info: %w", err) } @@ -322,33 +294,36 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, lo for _, dev := range devices { if dev.id != "" { logger.Debugf("deleting device %s from control", string(dev.id)) - if err = tailscaleClient.DeleteDevice(ctx, string(dev.id)); err != nil { - if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) - } else { - return false, fmt.Errorf("deleting device: %w", err) - } - } else { - logger.Debugf("device %s deleted from control", string(dev.id)) + err = tsClient.Devices().Delete(ctx, string(dev.id)) + switch { + case tailscale.IsNotFound(err): + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) + case err != nil: + return false, fmt.Errorf("deleting device: %w", err) } + + logger.Debugf("device %s deleted from control", string(dev.id)) } } - types := []client.Object{ + resourceTypes := []client.Object{ &corev1.Service{}, &corev1.Secret{}, } - for _, typ := range types { - if err := a.DeleteAllOf(ctx, typ, client.InNamespace(a.operatorNamespace), client.MatchingLabels(labels)); err != nil { + + for _, resourceType := range resourceTypes { + if err = r.DeleteAllOf(ctx, resourceType, client.InNamespace(r.operatorNamespace), client.MatchingLabels(labels)); err != nil { return false, err } } + mo := &metricsOpts{ proxyLabels: labels, - tsNamespace: a.operatorNamespace, + tsNamespace: r.operatorNamespace, proxyType: typ, } - if err = maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { + + if err = maybeCleanupMetricsResources(ctx, mo, r.Client); err != nil { return false, fmt.Errorf("error cleaning up metrics resources: %w", err) } @@ -382,12 +357,12 @@ func statefulSetNameBase(parent string) string { } } -func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) { +func (r *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) { nameBase := statefulSetNameBase(sts.ParentResourceName) hsvc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: nameBase, - Namespace: a.operatorNamespace, + Namespace: r.operatorNamespace, Labels: sts.ChildResourceLabels, }, Spec: corev1.ServiceSpec{ @@ -399,10 +374,10 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l }, } logger.Debugf("reconciling headless service for StatefulSet") - return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) + return createOrUpdate(ctx, r.Client, r.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscaleClient tsClient, loginUrl string, stsC *tailscaleSTSConfig, hsvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +func (r *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tsClient tsclient.Client, stsC *tailscaleSTSConfig, hsvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { secretNames := make([]string, stsC.Replicas) // Start by ensuring we have Secrets for the desired number of replicas. This will handle both creating and scaling @@ -411,7 +386,7 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", hsvc.Name, i), - Namespace: a.operatorNamespace, + Namespace: r.operatorNamespace, Labels: stsC.ChildResourceLabels, }, } @@ -426,7 +401,7 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale secretNames[i] = secret.Name var orig *corev1.Secret // unmodified copy of secret - if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { + if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) orig = secret.DeepCopy() } else if !apierrors.IsNotFound(err) { @@ -437,21 +412,23 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale authKey string err error ) + if orig == nil { // Create API Key secret which is going to be used by the statefulset // to authenticate with Tailscale. logger.Debugf("creating authkey for new tailscale proxy") tags := stsC.Tags if len(tags) == 0 { - tags = a.defaultTags + tags = r.defaultTags } - authKey, err = newAuthKey(ctx, tailscaleClient, tags) + + authKey, err = newAuthKey(ctx, tsClient, tags) if err != nil { return nil, err } } - configs, err := tailscaledConfig(stsC, loginUrl, authKey, orig, hostname) + configs, err := tailscaledConfig(stsC, tsClient.LoginURL(), authKey, orig, hostname) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -483,12 +460,12 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { logger.With("config", sanitizeConfig(latestConfig)).Debugf("patching the existing proxy Secret") - if err = a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { + if err = r.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { return nil, err } } else { logger.With("config", sanitizeConfig(latestConfig)).Debugf("creating a new Secret for the proxy") - if err = a.Create(ctx, secret); err != nil { + if err = r.Create(ctx, secret); err != nil { return nil, err } } @@ -497,7 +474,7 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale // Next, we check if we have additional secrets and remove them and their associated device. This happens when we // scale an StatefulSet down. var secrets corev1.SecretList - if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(stsC.ChildResourceLabels)); err != nil { + if err := r.List(ctx, &secrets, client.InNamespace(r.operatorNamespace), client.MatchingLabels(stsC.ChildResourceLabels)); err != nil { return nil, err } @@ -517,16 +494,18 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale } if dev != nil && dev.id != "" { - err = tailscaleClient.DeleteDevice(ctx, string(dev.id)) - if errResp, ok := errors.AsType[*tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { + err = tsClient.Devices().Delete(ctx, string(dev.id)) + switch { + case tailscale.IsNotFound(err): // This device has possibly already been deleted in the admin console. So we can ignore this // and move on to removing the secret. - } else if err != nil { + continue + case err != nil: return nil, err } } - if err = a.Delete(ctx, &secret); err != nil { + if err = r.Delete(ctx, &secret); err != nil { return nil, err } } @@ -550,9 +529,9 @@ func sanitizeConfig(c ipn.ConfigVAlpha) ipn.ConfigVAlpha { // It retrieves info from a Kubernetes Secret labeled with the provided labels. Capver is cross-validated against the // Pod to ensure that it is the currently running Pod that set the capver. If the Pod or the Secret does not exist, the // returned capver is -1. Either of device ID, hostname and IPs can be empty string if not found in the Secret. -func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) ([]*device, error) { +func (r *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) ([]*device, error) { var secrets corev1.SecretList - if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(childLabels)); err != nil { + if err := r.List(ctx, &secrets, client.InNamespace(r.operatorNamespace), client.MatchingLabels(childLabels)); err != nil { return nil, err } @@ -560,7 +539,7 @@ func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map for _, sec := range secrets.Items { podUID := "" pod := new(corev1.Pod) - err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod) + err := r.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod) switch { case apierrors.IsNotFound(err): // If the Pod is not found, we won't have its UID. We can still get the device information but the @@ -633,22 +612,18 @@ func deviceInfo(sec *corev1.Secret, podUID string, log *zap.SugaredLogger) (dev return dev, nil } -func newAuthKey(ctx context.Context, tsClient tsClient, tags []string) (string, error) { - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Preauthorized: true, - Tags: tags, - }, - }, - } +func newAuthKey(ctx context.Context, client tsclient.Client, tags []string) (string, error) { + var caps tailscale.KeyCapabilities + caps.Devices.Create.Reusable = false + caps.Devices.Create.Preauthorized = true + caps.Devices.Create.Tags = tags - key, _, err := tsClient.CreateKey(ctx, caps) + key, err := client.Keys().CreateAuthKey(ctx, tailscale.CreateKeyRequest{Capabilities: caps}) if err != nil { return "", err } - return key, nil + + return key.Key, nil } //go:embed deploy/manifests/proxy.yaml @@ -657,7 +632,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecrets []string) (*appsv1.StatefulSet, error) { +func (r *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecrets []string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -670,17 +645,17 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S for i := range ss.Spec.Template.Spec.InitContainers { c := &ss.Spec.Template.Spec.InitContainers[i] if c.Name == "sysctler" { - c.Image = a.proxyImage + c.Image = r.proxyImage break } } } pod := &ss.Spec.Template container := &pod.Spec.Containers[0] - container.Image = a.proxyImage + container.Image = r.proxyImage ss.ObjectMeta = metav1.ObjectMeta{ Name: headlessSvc.Name, - Namespace: a.operatorNamespace, + Namespace: r.operatorNamespace, } for key, val := range sts.ChildResourceLabels { mak.Set(&ss.ObjectMeta.Labels, key, val) @@ -748,13 +723,13 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }) } - if a.tsFirewallMode != "" { + if r.tsFirewallMode != "" { container.Env = append(container.Env, corev1.EnvVar{ Name: "TS_DEBUG_FIREWALL_MODE", - Value: a.tsFirewallMode, + Value: r.tsFirewallMode, }) } - pod.Spec.PriorityClassName = a.proxyPriorityClassName + pod.Spec.PriorityClassName = r.proxyPriorityClassName // Ingress/egress proxy configuration options. if sts.ClusterTargetIP != "" { @@ -829,7 +804,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S s.ObjectMeta.Labels = ss.Labels s.ObjectMeta.Annotations = ss.Annotations } - return createOrUpdate(ctx, a.Client, a.operatorNamespace, ss, updateSS) + return createOrUpdate(ctx, r.Client, r.operatorNamespace, ss, updateSS) } func appInfoForProxy(cfg *tailscaleSTSConfig) (string, error) { diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index e1891a4a9..29d1a1ebd 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "fmt" - "net/http" "net/netip" "reflect" "slices" @@ -27,11 +26,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" - "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" @@ -57,7 +57,7 @@ type HAServiceReconciler struct { isDefaultLoadBalancer bool recorder record.EventRecorder logger *zap.SugaredLogger - tsClient tsClient + clients ClientProvider tsNamespace string defaultTags []string operatorID string // stableID of the operator's Tailscale device @@ -121,7 +121,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque return res, nil } - tailscaleClient, err := clientFromProxyGroup(ctx, r.Client, pg, r.tsNamespace, r.tsClient) + tsClient, err := r.clients.For(pg.Spec.Tailnet) if err != nil { return res, fmt.Errorf("failed to get tailscale client: %w", err) } @@ -131,7 +131,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque if !svc.DeletionTimestamp.IsZero() || !r.isTailscaleService(svc) { logger.Debugf("Service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up") - _, err = r.maybeCleanup(ctx, hostname, svc, logger, tailscaleClient) + _, err = r.maybeCleanup(ctx, hostname, svc, logger, tsClient) return res, err } @@ -139,7 +139,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the Tailscale Service in a // multi-cluster Ingress setup have not resulted in another actor overwriting our Tailscale Service update. needsRequeue := false - needsRequeue, err = r.maybeProvision(ctx, hostname, svc, pg, logger, tailscaleClient) + needsRequeue, err = r.maybeProvision(ctx, hostname, svc, pg, logger, tsClient) if err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) @@ -162,7 +162,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque // If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. // Returns true if the operation resulted in a Tailscale Service update. -func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (svcsChanged bool, err error) { +func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsclient.Client) (svcsChanged bool, err error) { oldSvcStatus := svc.Status.DeepCopy() defer func() { if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) { @@ -209,8 +209,8 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // 2. Ensure that there isn't a Tailscale Service with the same hostname // already created and not owned by this Service. serviceName := tailcfg.ServiceName("svc:" + hostname) - existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) - if err != nil && !isErrorTailscaleServiceNotFound(err) { + existingTSSvc, err := tsClient.VIPServices().Get(ctx, serviceName.String()) + if err != nil && !tailscale.IsNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -233,8 +233,8 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin tags = strings.Split(tstr, ",") } - tsSvc := &tailscale.VIPService{ - Name: serviceName, + tsSvc := tailscale.VIPService{ + Name: serviceName.String(), Tags: tags, Ports: []string{"do-not-validate"}, // we don't want to validate ports Comment: managedTSServiceComment, @@ -249,12 +249,13 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // with the same generation number has been reconciled ~more than N times and stop attempting to apply updates. if existingTSSvc == nil || !reflect.DeepEqual(tsSvc.Tags, existingTSSvc.Tags) || - !ownersAreSetAndEqual(tsSvc, existingTSSvc) { + !ownersAreSetAndEqual(tsSvc, *existingTSSvc) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err := tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err = tsClient.VIPServices().CreateOrUpdate(ctx, tsSvc); err != nil { return false, fmt.Errorf("error creating Tailscale Service: %w", err) } - existingTSSvc = tsSvc + + existingTSSvc = &tsSvc } cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pg.Name, r.tsNamespace) @@ -266,12 +267,12 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } - if existingTSSvc.Addrs == nil { - existingTSSvc, err = tsClient.GetVIPService(ctx, tsSvc.Name) - if err != nil { + if len(existingTSSvc.Addrs) == 0 { + existingTSSvc, err = tsClient.VIPServices().Get(ctx, tsSvc.Name) + switch { + case err != nil: return false, fmt.Errorf("error getting Tailscale Service: %w", err) - } - if existingTSSvc.Addrs == nil { + case len(existingTSSvc.Addrs) == 0: // TODO(irbekrm): this should be a retry return false, fmt.Errorf("unexpected: Tailscale Service addresses not populated") } @@ -374,7 +375,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Service. -func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger, tsClient tsClient) (svcChanged bool, err error) { +func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger, tsClient tsclient.Client) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Service are cleaned up") ix := slices.Index(svc.Finalizers, svcPGFinalizerName) if ix < 0 { @@ -392,7 +393,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, serviceName := tailcfg.ServiceName("svc:" + hostname) // 1. Clean up the Tailscale Service. - svcChanged, err = cleanupTailscaleService(ctx, tsClient, serviceName, r.operatorID, logger) + svcChanged, err = cleanupTailscaleService(ctx, tsClient, serviceName.String(), r.operatorID, logger) if err != nil { return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } @@ -425,7 +426,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, // Tailscale Services that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. // Returns true if the operation resulted in existing Tailscale Service updates (owner reference removal). -func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger, tsClient tsClient) (svcsChanged bool, err error) { +func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger, tsClient tsclient.Client) (svcsChanged bool, err error) { cm, config, err := ingressSvcsConfigs(ctx, r.Client, proxyGroupName, r.tsNamespace) if err != nil { return false, fmt.Errorf("failed to get ingress service config: %s", err) @@ -453,7 +454,7 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - svcsChanged, err = cleanupTailscaleService(ctx, tsClient, tailcfg.ServiceName(tsSvcName), r.operatorID, logger) + svcsChanged, err = cleanupTailscaleService(ctx, tsClient, tsSvcName, r.operatorID, logger) if err != nil { return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } @@ -517,29 +518,28 @@ func (r *HAServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool { // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. -func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { - svc, err := tsClient.GetVIPService(ctx, name) - if err != nil { - errResp, ok := errors.AsType[tailscale.ErrResponse](err) - if ok && errResp.Status == http.StatusNotFound { - return false, nil - } - if !ok { - return false, fmt.Errorf("unexpected error getting Tailscale Service %q: %w", name.String(), err) - } - - return false, fmt.Errorf("error getting Tailscale Service: %w", err) +func cleanupTailscaleService(ctx context.Context, tsClient tsclient.Client, name string, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { + svc, err := tsClient.VIPServices().Get(ctx, name) + switch { + case tailscale.IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("unexpected error getting Tailscale Service %q: %w", name, err) } + if svc == nil { return false, nil } + o, err := parseOwnerAnnotation(svc) if err != nil { return false, fmt.Errorf("error parsing Tailscale Service owner annotation: %w", err) } + if o == nil || len(o.OwnerRefs) == 0 { return false, nil } + // Comparing with the operatorID only means that we will not be able to // clean up Tailscale Services in cases where the operator was deleted from the // cluster before deleting the Ingress. Perhaps the comparison could be @@ -550,18 +550,22 @@ func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcf if ix == -1 { return false, nil } + if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", name) - return false, tsClient.DeleteVIPService(ctx, name) + return false, tsClient.VIPServices().Delete(ctx, name) } + o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) logger.Infof("Updating Tailscale Service %q", name) - json, err := json.Marshal(o) + + data, err := json.Marshal(o) if err != nil { return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } - svc.Annotations[ownerAnnotation] = string(json) - return true, tsClient.CreateOrUpdateVIPService(ctx, svc) + + svc.Annotations[ownerAnnotation] = string(data) + return true, tsClient.VIPServices().CreateOrUpdate(ctx, *svc) } func (r *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 7a767a9b8..455d3363c 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -22,15 +22,15 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/client/tailscale/v2" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/util/mak" - - "tailscale.com/tailcfg" ) func TestServicePGReconciler(t *testing.T) { @@ -102,11 +102,11 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) { verifyTailscaleService(t, ft, fmt.Sprintf("svc:%s", hostname), []string{"do-not-validate"}) verifyTailscaledConfig(t, fc, "test-pg", []string{fmt.Sprintf("svc:%s", hostname)}) - _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName(fmt.Sprintf("svc:default-%s", svc.Name))) + _, err := ft.VIPServices().Get(context.Background(), fmt.Sprintf("svc:default-%s", svc.Name)) if err == nil { t.Fatalf("svc:default-%s not cleaned up", svc.Name) } - if !isErrorTailscaleServiceNotFound(err) { + if !tailscale.IsNotFound(err) { t.Fatalf("unexpected error: %v", err) } } @@ -188,7 +188,9 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien t.Fatal(err) } - ft := &fakeTSClient{} + ft := &fakeTSClient{ + vipServices: make(map[string]tailscale.VIPService), + } zl, err := zap.NewDevelopment() if err != nil { t.Fatal(err) @@ -197,7 +199,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien cl := tstest.NewClock(tstest.ClockOpts{}) svcPGR := &HAServiceReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), clock: cl, defaultTags: []string{"tag:k8s"}, tsNamespace: "operator-ns", @@ -275,22 +277,22 @@ func TestServicePGReconciler_MultiCluster(t *testing.T) { if i == 0 { ft = fti } else { - pgr.tsClient = ft + pgr.clients = tsclient.NewProvider(ft) } svc, _ := setupTestService(t, "test-multi-cluster", "", "4.3.2.1", fc, stateSecret) expectReconciled(t, pgr, "default", svc.Name) - tsSvcs, err := ft.ListVIPServices(context.Background()) + tsSvcs, err := ft.VIPServices().List(t.Context()) if err != nil { t.Fatalf("getting Tailscale Service: %v", err) } - if len(tsSvcs.VIPServices) != 1 { - t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs.VIPServices)) + if len(tsSvcs) != 1 { + t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs)) } - for _, svc := range tsSvcs.VIPServices { + for _, svc := range tsSvcs { t.Logf("found Tailscale Service with name %q", svc.Name) } } @@ -322,9 +324,9 @@ func TestIgnoreRegularService(t *testing.T) { verifyTailscaledConfig(t, fc, "test-pg", nil) - tsSvcs, err := ft.ListVIPServices(context.Background()) + tsSvcs, err := ft.VIPServices().List(t.Context()) if err == nil { - if len(tsSvcs.VIPServices) > 0 { + if len(tsSvcs) > 0 { t.Fatal("unexpected Tailscale Services found") } } diff --git a/cmd/k8s-operator/svc_test.go b/cmd/k8s-operator/svc_test.go index 3a6ea044d..677e9db10 100644 --- a/cmd/k8s-operator/svc_test.go +++ b/cmd/k8s-operator/svc_test.go @@ -16,7 +16,9 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" ) @@ -47,7 +49,7 @@ func TestService_DefaultProxyClassInitiallyNotReady(t *testing.T) { Client: fc, ssr: &tailscaleSTSReconciler{ Client: fc, - tsClient: ft, + clients: tsclient.NewProvider(ft), defaultTags: []string{"tag:k8s"}, operatorNamespace: "operator-ns", proxyImage: "tailscale/tailscale", diff --git a/cmd/k8s-operator/tailnet.go b/cmd/k8s-operator/tailnet.go deleted file mode 100644 index 439489f75..000000000 --- a/cmd/k8s-operator/tailnet.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !plan9 - -package main - -import ( - "context" - "fmt" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/clientcredentials" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - "tailscale.com/internal/client/tailscale" - "tailscale.com/ipn" - operatorutils "tailscale.com/k8s-operator" - tsapi "tailscale.com/k8s-operator/apis/v1alpha1" -) - -func clientForTailnet(ctx context.Context, cl client.Client, namespace, name string) (tsClient, string, error) { - var tn tsapi.Tailnet - if err := cl.Get(ctx, client.ObjectKey{Name: name}, &tn); err != nil { - return nil, "", fmt.Errorf("failed to get tailnet %q: %w", name, err) - } - - if !operatorutils.TailnetIsReady(&tn) { - return nil, "", fmt.Errorf("tailnet %q is not ready", name) - } - - var secret corev1.Secret - if err := cl.Get(ctx, client.ObjectKey{Name: tn.Spec.Credentials.SecretName, Namespace: namespace}, &secret); err != nil { - return nil, "", fmt.Errorf("failed to get Secret %q in namespace %q: %w", tn.Spec.Credentials.SecretName, namespace, err) - } - - baseURL := ipn.DefaultControlURL - if tn.Spec.LoginURL != "" { - baseURL = tn.Spec.LoginURL - } - - credentials := clientcredentials.Config{ - ClientID: string(secret.Data["client_id"]), - ClientSecret: string(secret.Data["client_secret"]), - TokenURL: baseURL + "/api/v2/oauth/token", - } - - source := credentials.TokenSource(ctx) - httpClient := oauth2.NewClient(ctx, source) - - ts := tailscale.NewClient(defaultTailnet, nil) - ts.UserAgent = "tailscale-k8s-operator" - ts.HTTPClient = httpClient - ts.BaseURL = baseURL - - return ts, baseURL, nil -} - -func clientFromProxyGroup(ctx context.Context, cl client.Client, pg *tsapi.ProxyGroup, namespace string, def tsClient) (tsClient, error) { - if pg.Spec.Tailnet == "" { - return def, nil - } - - tailscaleClient, _, err := clientForTailnet(ctx, cl, namespace, pg.Spec.Tailnet) - if err != nil { - return nil, err - } - - return tailscaleClient, nil -} diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 36b608ef6..074d92094 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "maps" "net/http" "net/netip" "path" @@ -31,12 +32,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" - "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" - "tailscale.com/tailcfg" "tailscale.com/util/mak" ) @@ -836,12 +837,131 @@ func expectEvents(t *testing.T, rec *record.FakeRecorder, wantsEvents []string) } } -type fakeTSClient struct { - sync.Mutex - keyRequests []tailscale.KeyCapabilities - deleted []string - vipServices map[tailcfg.ServiceName]*tailscale.VIPService +type ( + fakeTSClient struct { + sync.Mutex + loginURL string + keyRequests []tailscale.KeyCapabilities + deleted []string + devices []tailscale.Device + vipServices map[string]tailscale.VIPService + } + + fakeVIPServices struct { + mu sync.RWMutex + vipServices map[string]tailscale.VIPService + } + + fakeKeys struct { + keyRequests *[]tailscale.KeyCapabilities + } + + fakeDevices struct { + deleted *[]string + devices *[]tailscale.Device + } +) + +func (c *fakeTSClient) VIPServices() tsclient.VIPServiceResource { + return &fakeVIPServices{ + vipServices: c.vipServices, + } } + +func (m *fakeVIPServices) List(_ context.Context) ([]tailscale.VIPService, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + if len(m.vipServices) == 0 { + return nil, tailscale.APIError{Status: http.StatusNotFound} + } + + return slices.Collect(maps.Values(m.vipServices)), nil +} + +func (m *fakeVIPServices) Delete(_ context.Context, name string) error { + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.vipServices[name]; !ok { + return tailscale.APIError{Status: http.StatusNotFound} + } + + delete(m.vipServices, name) + return nil +} + +func (m *fakeVIPServices) Get(_ context.Context, name string) (*tailscale.VIPService, error) { + if svc, ok := m.vipServices[name]; ok { + return &svc, nil + } + + return nil, tailscale.APIError{Status: http.StatusNotFound} +} + +func (m *fakeVIPServices) CreateOrUpdate(_ context.Context, svc tailscale.VIPService) error { + m.mu.Lock() + defer m.mu.Unlock() + + if svc.Addrs == nil { + svc.Addrs = []string{vipTestIP} + } + + m.vipServices[svc.Name] = svc + return nil +} + +func (c *fakeTSClient) Devices() tsclient.DeviceResource { + return &fakeDevices{ + deleted: &c.deleted, + devices: &c.devices, + } +} + +func (m *fakeDevices) Delete(_ context.Context, id string) error { + *m.deleted = append(*m.deleted, id) + + return tailscale.APIError{Status: http.StatusNotFound} +} + +func (m *fakeDevices) List(_ context.Context, _ ...tailscale.ListDevicesOptions) ([]tailscale.Device, error) { + return *m.devices, nil +} + +func (m *fakeDevices) Get(_ context.Context, id string) (*tailscale.Device, error) { + if m.devices == nil { + return nil, tailscale.APIError{Status: http.StatusNotFound} + } + + for _, dev := range *m.devices { + if dev.ID == id { + return &dev, nil + } + } + + return nil, tailscale.APIError{Status: http.StatusNotFound} +} + +func (c *fakeTSClient) Keys() tsclient.KeyResource { + return &fakeKeys{ + keyRequests: &c.keyRequests, + } +} + +func (m *fakeKeys) CreateAuthKey(_ context.Context, ckr tailscale.CreateKeyRequest) (*tailscale.Key, error) { + *m.keyRequests = append(*m.keyRequests, ckr.Capabilities) + + return &tailscale.Key{Key: "new-authkey"}, nil +} + +func (m *fakeKeys) List(_ context.Context, _ bool) ([]tailscale.Key, error) { + return nil, nil +} + +func (c *fakeTSClient) LoginURL() string { + return c.loginURL +} + type fakeTSNetServer struct { certDomains []string } @@ -850,48 +970,6 @@ func (f *fakeTSNetServer) CertDomains() []string { return f.certDomains } -func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) { - c.Lock() - defer c.Unlock() - c.keyRequests = append(c.keyRequests, caps) - k := &tailscale.Key{ - ID: "key", - Created: time.Now(), - Capabilities: caps, - } - return "new-authkey", k, nil -} - -func (c *fakeTSClient) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) { - return &tailscale.Device{ - DeviceID: deviceID, - Hostname: "hostname-" + deviceID, - Addresses: []string{ - "1.2.3.4", - "::1", - }, - }, nil -} - -func (c *fakeTSClient) DeleteDevice(ctx context.Context, deviceID string) error { - c.Lock() - defer c.Unlock() - c.deleted = append(c.deleted, deviceID) - return nil -} - -func (c *fakeTSClient) KeyRequests() []tailscale.KeyCapabilities { - c.Lock() - defer c.Unlock() - return c.keyRequests -} - -func (c *fakeTSClient) Deleted() []string { - c.Lock() - defer c.Unlock() - return c.deleted -} - func removeResourceReqs(sts *appsv1.StatefulSet) { if sts != nil { sts.Spec.Template.Spec.Resources = nil @@ -935,53 +1013,3 @@ func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) { } } } - -func (c *fakeTSClient) GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) { - c.Lock() - defer c.Unlock() - if c.vipServices == nil { - return nil, tailscale.ErrResponse{Status: http.StatusNotFound} - } - svc, ok := c.vipServices[name] - if !ok { - return nil, tailscale.ErrResponse{Status: http.StatusNotFound} - } - return svc, nil -} - -func (c *fakeTSClient) ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) { - c.Lock() - defer c.Unlock() - if c.vipServices == nil { - return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} - } - result := &tailscale.VIPServiceList{} - for _, svc := range c.vipServices { - result.VIPServices = append(result.VIPServices, *svc) - } - return result, nil -} - -func (c *fakeTSClient) CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error { - c.Lock() - defer c.Unlock() - if c.vipServices == nil { - c.vipServices = make(map[tailcfg.ServiceName]*tailscale.VIPService) - } - - if svc.Addrs == nil { - svc.Addrs = []string{vipTestIP} - } - - c.vipServices[svc.Name] = svc - return nil -} - -func (c *fakeTSClient) DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error { - c.Lock() - defer c.Unlock() - if c.vipServices != nil { - delete(c.vipServices, name) - } - return nil -} diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index 063c2f768..e893fa341 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -8,7 +8,7 @@ package main import ( "context" "fmt" - "net/http" + "net/url" "os" "sync" "time" @@ -16,16 +16,13 @@ import ( "go.uber.org/zap" "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" - "tailscale.com/internal/client/tailscale" + "tailscale.com/client/tailscale/v2" + "tailscale.com/ipn" - "tailscale.com/tailcfg" ) -// defaultTailnet is a value that can be used in Tailscale API calls instead of tailnet name to indicate that the API -// call should be performed on the default tailnet for the provided credentials. const ( - defaultTailnet = "-" - oidcJWTPath = "/var/run/secrets/tailscale/serviceaccount/token" + oidcJWTPath = "/var/run/secrets/tailscale/serviceaccount/token" ) func newTSClient(logger *zap.SugaredLogger, clientID, clientIDPath, clientSecretPath, loginServer string) (*tailscale.Client, error) { @@ -34,24 +31,31 @@ func newTSClient(logger *zap.SugaredLogger, clientID, clientIDPath, clientSecret baseURL = loginServer } - var httpClient *http.Client + base, err := url.Parse(baseURL) + if err != nil { + return nil, err + } + + client := &tailscale.Client{ + UserAgent: "tailscale-k8s-operator", + BaseURL: base, + } + if clientID == "" { // Use static client credentials mounted to disk. - id, err := os.ReadFile(clientIDPath) + clientIDBytes, err := os.ReadFile(clientIDPath) if err != nil { return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) } - secret, err := os.ReadFile(clientSecretPath) + clientSecretBytes, err := os.ReadFile(clientSecretPath) if err != nil { return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) } - credentials := clientcredentials.Config{ - ClientID: string(id), - ClientSecret: string(secret), - TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token"), + + client.Auth = &tailscale.OAuth{ + ClientID: string(clientIDBytes), + ClientSecret: string(clientSecretBytes), } - tokenSrc := credentials.TokenSource(context.Background()) - httpClient = oauth2.NewClient(context.Background(), tokenSrc) } else { // Use workload identity federation. tokenSrc := &jwtTokenSource{ @@ -62,34 +66,21 @@ func newTSClient(logger *zap.SugaredLogger, clientID, clientIDPath, clientSecret TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token-exchange"), }, } - httpClient = &http.Client{ - Transport: &oauth2.Transport{ - Source: tokenSrc, + + client.Auth = &tailscale.IdentityFederation{ + ClientID: os.Getenv("TAILSCALE_OAUTH_CLIENT_ID"), + IDTokenFunc: func() (string, error) { + token, err := tokenSrc.Token() + if err != nil { + return "", err + } + + return token.AccessToken, nil }, } } - c := tailscale.NewClient(defaultTailnet, nil) - c.UserAgent = "tailscale-k8s-operator" - c.HTTPClient = httpClient - if loginServer != "" { - c.BaseURL = loginServer - } - return c, nil -} - -type tsClient interface { - CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) - Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) - DeleteDevice(ctx context.Context, nodeStableID string) error - // GetVIPService is a method for getting a Tailscale Service. VIPService is the original name for Tailscale Service. - GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) - // ListVIPServices is a method for listing all Tailscale Services. VIPService is the original name for Tailscale Service. - ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) - // CreateOrUpdateVIPService is a method for creating or updating a Tailscale Service. - CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error - // DeleteVIPService is a method for deleting a Tailscale Service. - DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error + return client, nil } // jwtTokenSource implements the [oauth2.TokenSource] interface, but with the diff --git a/cmd/k8s-operator/tsclient_test.go b/cmd/k8s-operator/tsclient_test.go deleted file mode 100644 index c08705c78..000000000 --- a/cmd/k8s-operator/tsclient_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !plan9 - -package main - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "testing" - - "go.uber.org/zap" - "golang.org/x/oauth2" -) - -func TestNewStaticClient(t *testing.T) { - const ( - clientIDFile = "client-id" - clientSecretFile = "client-secret" - ) - - tmp := t.TempDir() - clientIDPath := filepath.Join(tmp, clientIDFile) - if err := os.WriteFile(clientIDPath, []byte("test-client-id"), 0600); err != nil { - t.Fatalf("error writing test file %q: %v", clientIDPath, err) - } - clientSecretPath := filepath.Join(tmp, clientSecretFile) - if err := os.WriteFile(clientSecretPath, []byte("test-client-secret"), 0600); err != nil { - t.Fatalf("error writing test file %q: %v", clientSecretPath, err) - } - - srv := testAPI(t, 3600) - cl, err := newTSClient(zap.NewNop().Sugar(), "", clientIDPath, clientSecretPath, srv.URL) - if err != nil { - t.Fatalf("error creating Tailscale client: %v", err) - } - - resp, err := cl.HTTPClient.Get(srv.URL) - if err != nil { - t.Fatalf("error making test API call: %v", err) - } - defer resp.Body.Close() - - got, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading response body: %v", err) - } - want := "Bearer " + testToken("/api/v2/oauth/token", "test-client-id", "test-client-secret", "") - if string(got) != want { - t.Errorf("got %q; want %q", got, want) - } -} - -func TestNewWorkloadIdentityClient(t *testing.T) { - // 5 seconds is within expiryDelta leeway, so the access token will - // immediately be considered expired and get refreshed on each access. - srv := testAPI(t, 5) - cl, err := newTSClient(zap.NewNop().Sugar(), "test-client-id", "", "", srv.URL) - if err != nil { - t.Fatalf("error creating Tailscale client: %v", err) - } - - // Modify the path where the JWT will be read from. - oauth2Transport, ok := cl.HTTPClient.Transport.(*oauth2.Transport) - if !ok { - t.Fatalf("expected oauth2.Transport, got %T", cl.HTTPClient.Transport) - } - jwtTokenSource, ok := oauth2Transport.Source.(*jwtTokenSource) - if !ok { - t.Fatalf("expected jwtTokenSource, got %T", oauth2Transport.Source) - } - tmp := t.TempDir() - jwtPath := filepath.Join(tmp, "token") - jwtTokenSource.jwtPath = jwtPath - - for _, jwt := range []string{"test-jwt", "updated-test-jwt"} { - if err := os.WriteFile(jwtPath, []byte(jwt), 0600); err != nil { - t.Fatalf("error writing test file %q: %v", jwtPath, err) - } - resp, err := cl.HTTPClient.Get(srv.URL) - if err != nil { - t.Fatalf("error making test API call: %v", err) - } - defer resp.Body.Close() - - got, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading response body: %v", err) - } - if want := "Bearer " + testToken("/api/v2/oauth/token-exchange", "test-client-id", "", jwt); string(got) != want { - t.Errorf("got %q; want %q", got, want) - } - } -} - -func testAPI(t *testing.T, expirationSeconds int) *httptest.Server { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - t.Logf("test server got request: %s %s", r.Method, r.URL.Path) - switch r.URL.Path { - case "/api/v2/oauth/token", "/api/v2/oauth/token-exchange": - id, secret, ok := r.BasicAuth() - if !ok { - t.Fatal("missing or invalid basic auth") - } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(map[string]any{ - "access_token": testToken(r.URL.Path, id, secret, r.FormValue("jwt")), - "token_type": "Bearer", - "expires_in": expirationSeconds, - }); err != nil { - t.Fatalf("error writing response: %v", err) - } - case "/": - // Echo back the authz header for test assertions. - _, err := w.Write([]byte(r.Header.Get("Authorization"))) - if err != nil { - t.Fatalf("error writing response: %v", err) - } - default: - w.WriteHeader(http.StatusNotFound) - } - })) - t.Cleanup(srv.Close) - return srv -} - -func testToken(path, id, secret, jwt string) string { - return fmt.Sprintf("%s|%s|%s|%s", path, id, secret, jwt) -} diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 0a7dbda58..7a96038ba 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "fmt" - "net/http" "slices" "strconv" "strings" @@ -30,10 +29,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" - "tailscale.com/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -60,8 +60,8 @@ type RecorderReconciler struct { log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock + clients ClientProvider tsNamespace string - tsClient tsClient loginServer string mu sync.Mutex // protects following @@ -99,7 +99,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return reconcile.Result{}, nil } - tailscaleClient, loginUrl, err := r.getClientAndLoginURL(ctx, tsr.Spec.Tailnet) + tsClient, err := r.clients.For(tsr.Spec.Tailnet) if err != nil { return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderTailnetUnavailable, err.Error()) } @@ -112,7 +112,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return reconcile.Result{}, nil } - if done, err := r.maybeCleanup(ctx, tsr, tailscaleClient); err != nil { + if done, err := r.maybeCleanup(ctx, tsr, tsClient); err != nil { return reconcile.Result{}, err } else if !done { logger.Debugf("Recorder resource cleanup not yet finished, will retry...") @@ -144,7 +144,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) } - if err = r.maybeProvision(ctx, tailscaleClient, loginUrl, tsr); err != nil { + if err = r.maybeProvision(ctx, tsClient, tsr); err != nil { reason := reasonRecorderCreationFailed message := fmt.Sprintf("failed creating Recorder: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -162,30 +162,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return setStatusReady(tsr, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated) } -// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL -// for the given tailnet name. If no tailnet is specified, returns the default client -// and login server. Applies fallback to the operator's login server if the tailnet -// doesn't specify a custom login URL. -func (r *RecorderReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, - string, error) { - if tailnetName == "" { - return r.tsClient, r.loginServer, nil - } - - tc, loginUrl, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) - if err != nil { - return nil, "", err - } - - // Apply fallback if tailnet doesn't specify custom login URL - if loginUrl == "" { - loginUrl = r.loginServer - } - - return tc, loginUrl, nil -} - -func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, loginUrl string, tsr *tsapi.Recorder) error { +func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsClient tsclient.Client, tsr *tsapi.Recorder) error { logger := r.logger(tsr.Name) r.mu.Lock() @@ -193,7 +170,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient gaugeRecorderResources.Set(int64(r.recorders.Len())) r.mu.Unlock() - if err := r.ensureAuthSecretsCreated(ctx, tailscaleClient, tsr); err != nil { + if err := r.ensureAuthSecretsCreated(ctx, tsClient, tsr); err != nil { return fmt.Errorf("error creating secrets: %w", err) } @@ -252,7 +229,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient return fmt.Errorf("error creating RoleBinding: %w", err) } - ss := tsrStatefulSet(tsr, r.tsNamespace, loginUrl) + ss := tsrStatefulSet(tsr, r.tsNamespace, tsClient.LoginURL()) _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations @@ -271,13 +248,13 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient // If we have scaled the recorder down, we will have dangling state secrets // that we need to clean up. - if err = r.maybeCleanupSecrets(ctx, tailscaleClient, tsr); err != nil { + if err = r.maybeCleanupSecrets(ctx, tsClient, tsr); err != nil { return fmt.Errorf("error cleaning up Secrets: %w", err) } var devices []tsapi.RecorderTailnetDevice for replica := range replicas { - dev, ok, err := r.getDeviceInfo(ctx, tailscaleClient, tsr.Name, replica) + dev, ok, err := r.getDeviceInfo(ctx, tsClient, tsr.Name, replica) switch { case err != nil: return fmt.Errorf("failed to get device info: %w", err) @@ -342,7 +319,7 @@ func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, ts return nil } -func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { +func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tsClient tsclient.Client, tsr *tsapi.Recorder) error { options := []client.ListOption{ client.InNamespace(r.tsNamespace), client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)), @@ -382,11 +359,12 @@ func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tailscaleC if ok { r.log.Debugf("deleting device %s", devicePrefs.Config.NodeID) - err = tailscaleClient.DeleteDevice(ctx, string(devicePrefs.Config.NodeID)) - if errResp, ok := errors.AsType[*tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { - // This device has possibly already been deleted in the admin console. So we can ignore this - // and move on to removing the secret. - } else if err != nil { + err = tsClient.Devices().Delete(ctx, string(devicePrefs.Config.NodeID)) + switch { + case tailscale.IsNotFound(err): + // This device has possibly already been deleted in the admin console. So we can ignore this + // and move on to removing the secret. + case err != nil: return err } } @@ -402,7 +380,7 @@ func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tailscaleC // maybeCleanup just deletes the device from the tailnet. All the kubernetes // resources linked to a Recorder will get cleaned up via owner references // (which we can use because they are all in the same namespace). -func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder, tailscaleClient tsClient) (bool, error) { +func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder, tsClient tsclient.Client) (bool, error) { logger := r.logger(tsr.Name) var replicas int32 = 1 @@ -426,12 +404,12 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record nodeID := string(devicePrefs.Config.NodeID) logger.Debugf("deleting device %s from control", nodeID) - if err = tailscaleClient.DeleteDevice(ctx, nodeID); err != nil { - if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", nodeID) - continue - } - + err = tsClient.Devices().Delete(ctx, nodeID) + switch { + case tailscale.IsNotFound(err): + logger.Debugf("device %s not found, likely because it has already been deleted from control", nodeID) + continue + case err != nil: return false, fmt.Errorf("error deleting device: %w", err) } @@ -451,7 +429,7 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record return true, nil } -func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { +func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tsClient tsclient.Client, tsr *tsapi.Recorder) error { var replicas int32 = 1 if tsr.Spec.Replicas != nil { replicas = *tsr.Spec.Replicas @@ -479,7 +457,7 @@ func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tails return fmt.Errorf("failed to get Secret %q: %w", key.Name, err) } - authKey, err := newAuthKey(ctx, tailscaleClient, tags.Stringify()) + authKey, err := newAuthKey(ctx, tsClient, tags.Stringify()) if err != nil { return err } @@ -581,7 +559,7 @@ func getDevicePrefs(secret *corev1.Secret) (prefs prefs, ok bool, err error) { return prefs, ok, nil } -func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tailscaleClient tsClient, tsrName string, replica int32) (d tsapi.RecorderTailnetDevice, ok bool, err error) { +func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsClient tsclient.Client, tsrName string, replica int32) (d tsapi.RecorderTailnetDevice, ok bool, err error) { secret, err := r.getStateSecret(ctx, tsrName, replica) if err != nil || secret == nil { return tsapi.RecorderTailnetDevice{}, false, err @@ -595,7 +573,7 @@ func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tailscaleClient // TODO(tomhjp): The profile info doesn't include addresses, which is why we // need the API. Should maybe update tsrecorder to write IPs to the state // Secret like containerboot does. - device, err := tailscaleClient.Device(ctx, string(prefs.Config.NodeID), nil) + device, err := tsClient.Devices().Get(ctx, string(prefs.Config.NodeID)) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index d3ebc3bd5..231ba794a 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -21,9 +21,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/client/tailscale/v2" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/tstest" ) @@ -48,14 +50,14 @@ func TestRecorder(t *testing.T) { WithObjects(tsr). WithStatusSubresource(tsr). Build() - tsClient := &fakeTSClient{} + tsClient := &fakeTSClient{loginURL: tsLoginServer} zl, _ := zap.NewDevelopment() fr := record.NewFakeRecorder(2) cl := tstest.NewClock(tstest.ClockOpts{}) reconciler := &RecorderReconciler{ tsNamespace: tsNamespace, Client: fc, - tsClient: tsClient, + clients: tsclient.NewProvider(tsClient), recorder: fr, log: zl.Sugar(), clock: cl, @@ -194,8 +196,8 @@ func TestRecorder(t *testing.T) { }) t.Run("populate_node_info_in_state_secret_and_see_it_appear_in_status", func(t *testing.T) { - const key = "profile-abc" + for replica := range *tsr.Spec.Replicas { bytes, err := json.Marshal(map[string]any{ "Config": map[string]any{ @@ -218,6 +220,24 @@ func TestRecorder(t *testing.T) { }) } + tsClient.devices = []tailscale.Device{ + { + ID: "node-0", + Hostname: "hostname-node-0", + Addresses: []string{"1.2.3.4", "::1"}, + }, + { + ID: "node-1", + Hostname: "hostname-node-1", + Addresses: []string{"1.2.3.4", "::1"}, + }, + { + ID: "node-2", + Hostname: "hostname-node-2", + Addresses: []string{"1.2.3.4", "::1"}, + }, + } + expectReconciled(t, reconciler, "", tsr.Name) tsr.Status.Devices = []tsapi.RecorderTailnetDevice{ { diff --git a/flake.nix b/flake.nix index abcd8f2f9..a81c801c2 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-39axT5Q0+fNTcMgZCMLMNfJEJN46wMaaKDgfI+Uj+Ps= +# nix-direnv cache busting line: sha256-d4rAAWD67rt6raRsJLmZij2e2Tiib8OlHNIlMktcEAU= diff --git a/go.mod b/go.mod index 8e31076a3..d37278c85 100644 --- a/go.mod +++ b/go.mod @@ -93,7 +93,7 @@ require ( github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 - github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a + github.com/tailscale/hujson v0.0.0-20260302212456-ecc657c15afd github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc @@ -114,7 +114,7 @@ require ( golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mod v0.31.0 golang.org/x/net v0.48.0 - golang.org/x/oauth2 v0.33.0 + golang.org/x/oauth2 v0.36.0 golang.org/x/sync v0.19.0 golang.org/x/sys v0.40.0 golang.org/x/term v0.38.0 @@ -135,6 +135,7 @@ require ( sigs.k8s.io/kind v0.30.0 sigs.k8s.io/yaml v1.6.0 software.sslmate.com/src/go-pkcs12 v0.4.0 + tailscale.com/client/tailscale/v2 v2.9.0 ) require ( diff --git a/go.mod.sri b/go.mod.sri index 887edc0c6..d929157a0 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-39axT5Q0+fNTcMgZCMLMNfJEJN46wMaaKDgfI+Uj+Ps= +sha256-d4rAAWD67rt6raRsJLmZij2e2Tiib8OlHNIlMktcEAU= diff --git a/go.sum b/go.sum index b09fcdc72..e9720d777 100644 --- a/go.sum +++ b/go.sum @@ -1138,8 +1138,8 @@ github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUge github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= +github.com/tailscale/hujson v0.0.0-20260302212456-ecc657c15afd h1:Rf9uhF1+VJ7ZHqxrG8pJ6YacmHvVCmByDmGbAWCc/gA= +github.com/tailscale/hujson v0.0.0-20260302212456-ecc657c15afd/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo= github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b h1:QKqCnmp0qHWUHySySKjpuhZANzRn7XrTVZWUuUgJ3lQ= github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b/go.mod h1:4st7fy3NTWcWsQdOC69JcHK4UXnncgcxSOvSR8aD8a0= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= @@ -1415,8 +1415,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1792,3 +1792,5 @@ sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +tailscale.com/client/tailscale/v2 v2.9.0 h1:zBZIIeIYXL42qvvile7d29O2DKSr3AfNc2gzd1JCf2o= +tailscale.com/client/tailscale/v2 v2.9.0/go.mod h1:FGjvGT3ThHelqo0gfdK3IN3k1dwNbRzYbQh2XO3C47U= diff --git a/k8s-operator/reconciler/tailnet/mocks_test.go b/k8s-operator/reconciler/tailnet/mocks_test.go index 434255688..3931e4d33 100644 --- a/k8s-operator/reconciler/tailnet/mocks_test.go +++ b/k8s-operator/reconciler/tailnet/mocks_test.go @@ -9,7 +9,9 @@ import ( "context" "io" - "tailscale.com/internal/client/tailscale" + "tailscale.com/client/tailscale/v2" + + "tailscale.com/k8s-operator/tsclient" ) type ( @@ -18,28 +20,62 @@ type ( ErrorOnKeys bool ErrorOnServices bool } + + MockDeviceResource struct { + tsclient.DeviceResource + + Error bool + } + + MockKeyResource struct { + tsclient.KeyResource + + Error bool + } + + MockVIPServiceResource struct { + tsclient.VIPServiceResource + + Error bool + } ) -func (m MockTailnetClient) Devices(_ context.Context, _ *tailscale.DeviceFieldsOpts) ([]*tailscale.Device, error) { - if m.ErrorOnDevices { +func (m MockKeyResource) List(_ context.Context, _ bool) ([]tailscale.Key, error) { + if m.Error { return nil, io.EOF } return nil, nil } -func (m MockTailnetClient) Keys(_ context.Context) ([]string, error) { - if m.ErrorOnKeys { +func (m MockDeviceResource) List(_ context.Context, _ ...tailscale.ListDevicesOptions) ([]tailscale.Device, error) { + if m.Error { return nil, io.EOF } return nil, nil } -func (m MockTailnetClient) ListVIPServices(_ context.Context) (*tailscale.VIPServiceList, error) { - if m.ErrorOnServices { +func (m MockVIPServiceResource) List(_ context.Context) ([]tailscale.VIPService, error) { + if m.Error { return nil, io.EOF } return nil, nil } + +func (m MockTailnetClient) Devices() tsclient.DeviceResource { + return MockDeviceResource{Error: m.ErrorOnDevices} +} + +func (m MockTailnetClient) Keys() tsclient.KeyResource { + return MockKeyResource{Error: m.ErrorOnKeys} +} + +func (m MockTailnetClient) VIPServices() tsclient.VIPServiceResource { + return MockVIPServiceResource{Error: m.ErrorOnServices} +} + +func (m MockTailnetClient) LoginURL() string { + return "" +} diff --git a/k8s-operator/reconciler/tailnet/tailnet.go b/k8s-operator/reconciler/tailnet/tailnet.go index 2e7004b69..bb05eb813 100644 --- a/k8s-operator/reconciler/tailnet/tailnet.go +++ b/k8s-operator/reconciler/tailnet/tailnet.go @@ -12,12 +12,11 @@ import ( "context" "errors" "fmt" + "net/url" "sync" "time" "go.uber.org/zap" - "golang.org/x/oauth2" - "golang.org/x/oauth2/clientcredentials" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,12 +25,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale/v2" - "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/k8s-operator/reconciler" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/kube/kubetypes" "tailscale.com/tstime" "tailscale.com/util/clientmetric" @@ -47,7 +47,8 @@ type ( tailscaleNamespace string clock tstime.Clock logger *zap.SugaredLogger - clientFunc func(*tsapi.Tailnet, *corev1.Secret) TailscaleClient + clientFunc func(*tsapi.Tailnet, *corev1.Secret) tsclient.Client + registry ClientRegistry // Metrics related fields mu sync.Mutex @@ -68,14 +69,18 @@ type ( Logger *zap.SugaredLogger // ClientFunc is a function that takes tailscale credentials and returns an implementation for the Tailscale // HTTP API. This should generally be nil unless needed for testing. - ClientFunc func(*tsapi.Tailnet, *corev1.Secret) TailscaleClient + ClientFunc func(*tsapi.Tailnet, *corev1.Secret) tsclient.Client + // Registry is used to store and share initialized tailscale clients for use by other reconcilers. + Registry ClientRegistry } - // The TailscaleClient interface describes types that interact with the Tailscale HTTP API. - TailscaleClient interface { - Devices(context.Context, *tailscale.DeviceFieldsOpts) ([]*tailscale.Device, error) - Keys(ctx context.Context) ([]string, error) - ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) + // The ClientRegistry interface describes types that can store initialized tailscale clients for use by other + // reconcilers. + ClientRegistry interface { + // Add should store the given tsclient.Client implementation for a specified tailnet. + Add(tailnet string, client tsclient.Client) + // Remove should remove any tsclient.Client implementation for a specified tailnet. + Remove(tailnet string) } ) @@ -90,6 +95,7 @@ func NewReconciler(options ReconcilerOptions) *Reconciler { clock: options.Clock, logger: options.Logger.Named(reconcilerName), clientFunc: options.ClientFunc, + registry: options.Registry, } } @@ -137,6 +143,7 @@ func (r *Reconciler) delete(ctx context.Context, tailnet *tsapi.Tailnet) (reconc r.tailnets.Remove(tailnet.UID) r.mu.Unlock() gaugeTailnetResources.Set(int64(r.tailnets.Len())) + r.registry.Remove(tailnet.Name) return reconcile.Result{}, nil } @@ -193,7 +200,10 @@ func (r *Reconciler) createOrUpdate(ctx context.Context, tailnet *tsapi.Tailnet) return reconcile.Result{RequeueAfter: time.Minute / 2}, nil } - tsClient := r.createClient(ctx, tailnet, &secret) + tsClient, err := r.createClient(tailnet, &secret) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create tailnet client: %w", err) + } // Second, we ensure the OAuth credentials supplied in the secret are valid and have the required scopes to access // the various API endpoints required by the operator. @@ -226,6 +236,8 @@ func (r *Reconciler) createOrUpdate(ctx context.Context, tailnet *tsapi.Tailnet) return reconcile.Result{}, fmt.Errorf("failed to add finalizer to Tailnet %q: %w", tailnet.Name, err) } + r.registry.Add(tailnet.Name, tsClient) + return reconcile.Result{}, nil } @@ -235,9 +247,9 @@ const ( clientSecretKey = "client_secret" ) -func (r *Reconciler) createClient(ctx context.Context, tailnet *tsapi.Tailnet, secret *corev1.Secret) TailscaleClient { +func (r *Reconciler) createClient(tailnet *tsapi.Tailnet, secret *corev1.Secret) (tsclient.Client, error) { if r.clientFunc != nil { - return r.clientFunc(tailnet, secret) + return r.clientFunc(tailnet, secret), nil } baseURL := ipn.DefaultControlURL @@ -245,38 +257,36 @@ func (r *Reconciler) createClient(ctx context.Context, tailnet *tsapi.Tailnet, s baseURL = tailnet.Spec.LoginURL } - credentials := clientcredentials.Config{ - ClientID: string(secret.Data[clientIDKey]), - ClientSecret: string(secret.Data[clientSecretKey]), - TokenURL: baseURL + "/api/v2/oauth/token", + base, err := url.Parse(baseURL) + if err != nil { + return nil, fmt.Errorf("failed to parse base URL %q: %w", baseURL, err) } - source := credentials.TokenSource(ctx) - httpClient := oauth2.NewClient(ctx, source) - - tsClient := tailscale.NewClient("-", nil) - tsClient.UserAgent = "tailscale-k8s-operator" - tsClient.HTTPClient = httpClient - tsClient.BaseURL = baseURL - - return tsClient + return tsclient.Wrap(&tailscale.Client{ + BaseURL: base, + UserAgent: "tailscale-k8s-operator", + Auth: &tailscale.OAuth{ + ClientID: string(secret.Data[clientIDKey]), + ClientSecret: string(secret.Data[clientSecretKey]), + }, + }), nil } -func (r *Reconciler) ensurePermissions(ctx context.Context, tsClient TailscaleClient, tailnet *tsapi.Tailnet) bool { +func (r *Reconciler) ensurePermissions(ctx context.Context, tsClient tsclient.Client, tailnet *tsapi.Tailnet) bool { // Perform basic list requests here to confirm that the OAuth credentials referenced on the Tailnet resource // can perform the basic operations required for the operator to function. This has a caveat of only performing // read actions, as we don't want to create arbitrary keys and VIP services. However, it will catch when a user // has completely forgotten an entire scope that's required. var errs error - if _, err := tsClient.Devices(ctx, nil); err != nil { + if _, err := tsClient.Devices().List(ctx); err != nil { errs = errors.Join(errs, fmt.Errorf("failed to list devices: %w", err)) } - if _, err := tsClient.Keys(ctx); err != nil { + if _, err := tsClient.Keys().List(ctx, false); err != nil { errs = errors.Join(errs, fmt.Errorf("failed to list auth keys: %w", err)) } - if _, err := tsClient.ListVIPServices(ctx); err != nil { + if _, err := tsClient.VIPServices().List(ctx); err != nil { errs = errors.Join(errs, fmt.Errorf("failed to list tailscale services: %w", err)) } diff --git a/k8s-operator/reconciler/tailnet/tailnet_test.go b/k8s-operator/reconciler/tailnet/tailnet_test.go index 0ed2ca598..0e6e2f94c 100644 --- a/k8s-operator/reconciler/tailnet/tailnet_test.go +++ b/k8s-operator/reconciler/tailnet/tailnet_test.go @@ -18,6 +18,7 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/k8s-operator/reconciler/tailnet" + "tailscale.com/k8s-operator/tsclient" "tailscale.com/tstest" ) @@ -36,7 +37,7 @@ func TestReconciler_Reconcile(t *testing.T) { Secret *corev1.Secret ExpectsError bool ExpectedConditions []metav1.Condition - ClientFunc func(*tsapi.Tailnet, *corev1.Secret) tailnet.TailscaleClient + ClientFunc func(*tsapi.Tailnet, *corev1.Secret) tsclient.Client }{ { Name: "ignores unknown tailnet requests", @@ -201,7 +202,7 @@ func TestReconciler_Reconcile(t *testing.T) { "client_secret": []byte("test"), }, }, - ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tsclient.Client { return &MockTailnetClient{ErrorOnDevices: true} }, ExpectedConditions: []metav1.Condition{ @@ -240,7 +241,7 @@ func TestReconciler_Reconcile(t *testing.T) { "client_secret": []byte("test"), }, }, - ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tsclient.Client { return &MockTailnetClient{ErrorOnServices: true} }, ExpectedConditions: []metav1.Condition{ @@ -279,7 +280,7 @@ func TestReconciler_Reconcile(t *testing.T) { "client_secret": []byte("test"), }, }, - ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tsclient.Client { return &MockTailnetClient{ErrorOnKeys: true} }, ExpectedConditions: []metav1.Condition{ @@ -318,7 +319,7 @@ func TestReconciler_Reconcile(t *testing.T) { "client_secret": []byte("test"), }, }, - ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tsclient.Client { return &MockTailnetClient{} }, ExpectedConditions: []metav1.Condition{ @@ -349,6 +350,7 @@ func TestReconciler_Reconcile(t *testing.T) { Logger: logger.Sugar(), ClientFunc: tc.ClientFunc, TailscaleNamespace: "tailscale", + Registry: tsclient.NewProvider(nil), } reconciler := tailnet.NewReconciler(opts) diff --git a/k8s-operator/tsclient/client.go b/k8s-operator/tsclient/client.go new file mode 100644 index 000000000..42a448c53 --- /dev/null +++ b/k8s-operator/tsclient/client.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package tsclient provides a mockable wrapper around the tailscale-client-go-v2 package for use by the Kubernetes +// operator. It also contains the Provider type used to manage multiple instances of tailscale clients for different +// tailnets. +package tsclient + +import ( + "context" + + "tailscale.com/client/tailscale/v2" +) + +type ( + // The Client interface describes types that interact with the Tailscale API. + Client interface { + // LoginURL should return the url of the Tailscale control plane. + LoginURL() string + // Devices should return a DeviceResource implementation used to interact with the devices API. + Devices() DeviceResource + // Keys should return a KeyResource implementation used to interact with the keys API. + Keys() KeyResource + // VIPServices should return a VIPServiceResource implementation used to interact with the VIP services API. + VIPServices() VIPServiceResource + } + + DeviceResource interface { + Delete(context.Context, string) error + List(context.Context, ...tailscale.ListDevicesOptions) ([]tailscale.Device, error) + Get(context.Context, string) (*tailscale.Device, error) + } + + KeyResource interface { + CreateAuthKey(ctx context.Context, ckr tailscale.CreateKeyRequest) (*tailscale.Key, error) + List(ctx context.Context, all bool) ([]tailscale.Key, error) + } + + VIPServiceResource interface { + List(ctx context.Context) ([]tailscale.VIPService, error) + Delete(ctx context.Context, name string) error + Get(ctx context.Context, name string) (*tailscale.VIPService, error) + CreateOrUpdate(ctx context.Context, svc tailscale.VIPService) error + } + + clientWrapper struct { + loginURL string + client *tailscale.Client + } +) + +func Wrap(client *tailscale.Client) Client { + return &clientWrapper{client: client, loginURL: client.BaseURL.String()} +} + +func (c *clientWrapper) Devices() DeviceResource { + return c.client.Devices() +} + +func (c *clientWrapper) Keys() KeyResource { + return c.client.Keys() +} + +func (c *clientWrapper) VIPServices() VIPServiceResource { + return c.client.VIPServices() +} + +func (c *clientWrapper) LoginURL() string { + return c.loginURL +} diff --git a/k8s-operator/tsclient/provider.go b/k8s-operator/tsclient/provider.go new file mode 100644 index 000000000..ad4550dec --- /dev/null +++ b/k8s-operator/tsclient/provider.go @@ -0,0 +1,67 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tsclient + +import ( + "errors" + "fmt" + "sync" +) + +type ( + // The Provider type is used to manage multiple Client implementations for different tailnets. + Provider struct { + defaultClient Client + mu sync.RWMutex + clients map[string]Client + } +) + +var ( + // ErrClientNotFound is the error given when calling Provider.For with a tailnet that has not yet been registered + // with the provider. + ErrClientNotFound = errors.New("client not found") +) + +// NewProvider returns a new instance of the Provider type that uses the given Client implementation as the default +// client. This client will be given when calling Provider.For with a blank tailnet name. +func NewProvider(defaultClient Client) *Provider { + return &Provider{ + defaultClient: defaultClient, + clients: make(map[string]Client), + } +} + +// Add a Client implementation for a given tailnet. +func (p *Provider) Add(tailnet string, client Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.clients[tailnet] = client +} + +// Remove the Client implementation associated with the given tailnet. +func (p *Provider) Remove(tailnet string) { + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.clients, tailnet) +} + +// For returns a Client implementation associated with the given tailnet. Returns ErrClientNotFound if the given +// tailnet does not exist. Use a blank tailnet name to obtain the default Client. +func (p *Provider) For(tailnet string) (Client, error) { + if tailnet == "" { + return p.defaultClient, nil + } + + p.mu.RLock() + defer p.mu.RUnlock() + + if client, ok := p.clients[tailnet]; ok { + return client, nil + } + + return nil, fmt.Errorf("%w: %s", ErrClientNotFound, tailnet) +} diff --git a/shell.nix b/shell.nix index 07d6eb933..1be1e768a 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-39axT5Q0+fNTcMgZCMLMNfJEJN46wMaaKDgfI+Uj+Ps= +# nix-direnv cache busting line: sha256-d4rAAWD67rt6raRsJLmZij2e2Tiib8OlHNIlMktcEAU=