appc,feature/conn25,net: Add DNS response interception for conn25

The new version of app connector (conn25) needs to read DNS responses
for domains it is interested in and store and swap out IP addresses.

Add a hook to dns manager to enable this.
Give the conn25 updated netmaps so that it knows when to assign
connecting addresses and from what pool.
Assign an address when we see a DNS response for a domain we are
interested in, but don't do anything with the address yet.

Updates tailscale/corp#34252

Signed-off-by: Fran Bull <fran@tailscale.com>
This commit is contained in:
Fran Bull 2026-01-28 14:07:08 -08:00
parent 6cbfc2f3ba
commit b58aa61dba
11 changed files with 1049 additions and 292 deletions

View File

@ -5,9 +5,7 @@ package appc
import (
"cmp"
"net/netip"
"slices"
"sync"
"tailscale.com/tailcfg"
"tailscale.com/types/appctype"
@ -15,105 +13,6 @@ import (
"tailscale.com/util/set"
)
// Conn25 holds the developing state for the as yet nascent next generation app connector.
// There is currently (2025-12-08) no actual app connecting functionality.
type Conn25 struct {
mu sync.Mutex
transitIPs map[tailcfg.NodeID]map[netip.Addr]netip.Addr
}
const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest"
// HandleConnectorTransitIPRequest creates a ConnectorTransitIPResponse in response to a ConnectorTransitIPRequest.
// It updates the connectors mapping of TransitIP->DestinationIP per peer (tailcfg.NodeID).
// If a peer has stored this mapping in the connector Conn25 will route traffic to TransitIPs to DestinationIPs for that peer.
func (c *Conn25) HandleConnectorTransitIPRequest(nid tailcfg.NodeID, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse {
resp := ConnectorTransitIPResponse{}
seen := map[netip.Addr]bool{}
for _, each := range ctipr.TransitIPs {
if seen[each.TransitIP] {
resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{
Code: OtherFailure,
Message: dupeTransitIPMessage,
})
continue
}
tipresp := c.handleTransitIPRequest(nid, each)
seen[each.TransitIP] = true
resp.TransitIPs = append(resp.TransitIPs, tipresp)
}
return resp
}
func (c *Conn25) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPRequest) TransitIPResponse {
c.mu.Lock()
defer c.mu.Unlock()
if c.transitIPs == nil {
c.transitIPs = make(map[tailcfg.NodeID]map[netip.Addr]netip.Addr)
}
peerMap, ok := c.transitIPs[nid]
if !ok {
peerMap = make(map[netip.Addr]netip.Addr)
c.transitIPs[nid] = peerMap
}
peerMap[tipr.TransitIP] = tipr.DestinationIP
return TransitIPResponse{}
}
func (c *Conn25) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr {
c.mu.Lock()
defer c.mu.Unlock()
return c.transitIPs[nid][tip]
}
// TransitIPRequest details a single TransitIP allocation request from a client to a
// connector.
type TransitIPRequest struct {
// TransitIP is the intermediate destination IP that will be received at this
// connector and will be replaced by DestinationIP when performing DNAT.
TransitIP netip.Addr `json:"transitIP,omitzero"`
// DestinationIP is the final destination IP that connections to the TransitIP
// should be mapped to when performing DNAT.
DestinationIP netip.Addr `json:"destinationIP,omitzero"`
}
// ConnectorTransitIPRequest is the request body for a PeerAPI request to
// /connector/transit-ip and can include zero or more TransitIP allocation requests.
type ConnectorTransitIPRequest struct {
// TransitIPs is the list of requested mappings.
TransitIPs []TransitIPRequest `json:"transitIPs,omitempty"`
}
// TransitIPResponseCode appears in TransitIPResponse and signifies success or failure status.
type TransitIPResponseCode int
const (
// OK indicates that the mapping was created as requested.
OK TransitIPResponseCode = 0
// OtherFailure indicates that the mapping failed for a reason that does not have
// another relevant [TransitIPResponsecode].
OtherFailure TransitIPResponseCode = 1
)
// TransitIPResponse is the response to a TransitIPRequest
type TransitIPResponse struct {
// Code is an error code indicating success or failure of the [TransitIPRequest].
Code TransitIPResponseCode `json:"code,omitzero"`
// Message is an error message explaining what happened, suitable for logging but
// not necessarily suitable for displaying in a UI to non-technical users. It
// should be empty when [Code] is [OK].
Message string `json:"message,omitzero"`
}
// ConnectorTransitIPResponse is the response to a ConnectorTransitIPRequest
type ConnectorTransitIPResponse struct {
// TransitIPs is the list of outcomes for each requested mapping. Elements
// correspond to the order of [ConnectorTransitIPRequest.TransitIPs].
TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"`
}
const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental"
// PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers

View File

@ -5,7 +5,6 @@ package appc
import (
"encoding/json"
"net/netip"
"reflect"
"testing"
@ -14,183 +13,6 @@ import (
"tailscale.com/types/opt"
)
// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a
// ConnectorTransitIPRequest with 0 TransitIPRequests, we respond with a
// ConnectorTransitIPResponse with 0 TransitIPResponses.
func TestHandleConnectorTransitIPRequestZeroLength(t *testing.T) {
c := &Conn25{}
req := ConnectorTransitIPRequest{}
nid := tailcfg.NodeID(1)
resp := c.HandleConnectorTransitIPRequest(nid, req)
if len(resp.TransitIPs) != 0 {
t.Fatalf("n TransitIPs in response: %d, want 0", len(resp.TransitIPs))
}
}
// TestHandleConnectorTransitIPRequestStoresAddr tests that if sent a
// request with a transit addr and a destination addr we store that mapping
// and can retrieve it. If sent another req with a different dst for that transit addr
// we store that instead.
func TestHandleConnectorTransitIPRequestStoresAddr(t *testing.T) {
c := &Conn25{}
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
dip := netip.MustParseAddr("1.2.3.4")
dip2 := netip.MustParseAddr("1.2.3.5")
mr := func(t, d netip.Addr) ConnectorTransitIPRequest {
return ConnectorTransitIPRequest{
TransitIPs: []TransitIPRequest{
{TransitIP: t, DestinationIP: d},
},
}
}
resp := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip))
if len(resp.TransitIPs) != 1 {
t.Fatalf("n TransitIPs in response: %d, want 1", len(resp.TransitIPs))
}
got := resp.TransitIPs[0].Code
if got != TransitIPResponseCode(0) {
t.Fatalf("TransitIP Code: %d, want 0", got)
}
gotAddr := c.transitIPTarget(nid, tip)
if gotAddr != dip {
t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip)
}
// mapping can be overwritten
resp2 := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip2))
if len(resp2.TransitIPs) != 1 {
t.Fatalf("n TransitIPs in response: %d, want 1", len(resp2.TransitIPs))
}
got2 := resp.TransitIPs[0].Code
if got2 != TransitIPResponseCode(0) {
t.Fatalf("TransitIP Code: %d, want 0", got2)
}
gotAddr2 := c.transitIPTarget(nid, tip)
if gotAddr2 != dip2 {
t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip2)
}
}
// TestHandleConnectorTransitIPRequestMultipleTIP tests that we can
// get a req with multiple mappings and we store them all. Including
// multiple transit addrs for the same destination.
func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) {
c := &Conn25{}
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
tip2 := netip.MustParseAddr("0.0.0.2")
tip3 := netip.MustParseAddr("0.0.0.3")
dip := netip.MustParseAddr("1.2.3.4")
dip2 := netip.MustParseAddr("1.2.3.5")
req := ConnectorTransitIPRequest{
TransitIPs: []TransitIPRequest{
{TransitIP: tip, DestinationIP: dip},
{TransitIP: tip2, DestinationIP: dip2},
// can store same dst addr for multiple transit addrs
{TransitIP: tip3, DestinationIP: dip},
},
}
resp := c.HandleConnectorTransitIPRequest(nid, req)
if len(resp.TransitIPs) != 3 {
t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs))
}
for i := 0; i < 3; i++ {
got := resp.TransitIPs[i].Code
if got != TransitIPResponseCode(0) {
t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got)
}
}
gotAddr1 := c.transitIPTarget(nid, tip)
if gotAddr1 != dip {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip)
}
gotAddr2 := c.transitIPTarget(nid, tip2)
if gotAddr2 != dip2 {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip2)
}
gotAddr3 := c.transitIPTarget(nid, tip3)
if gotAddr3 != dip {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip3, gotAddr3, dip)
}
}
// TestHandleConnectorTransitIPRequestSameTIP tests that if we get
// a req that has more than one TransitIPRequest for the same transit addr
// only the first is stored, and the subsequent ones get an error code and
// message in the response.
func TestHandleConnectorTransitIPRequestSameTIP(t *testing.T) {
c := &Conn25{}
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
tip2 := netip.MustParseAddr("0.0.0.2")
dip := netip.MustParseAddr("1.2.3.4")
dip2 := netip.MustParseAddr("1.2.3.5")
dip3 := netip.MustParseAddr("1.2.3.6")
req := ConnectorTransitIPRequest{
TransitIPs: []TransitIPRequest{
{TransitIP: tip, DestinationIP: dip},
// cannot have dupe TransitIPs in one ConnectorTransitIPRequest
{TransitIP: tip, DestinationIP: dip2},
{TransitIP: tip2, DestinationIP: dip3},
},
}
resp := c.HandleConnectorTransitIPRequest(nid, req)
if len(resp.TransitIPs) != 3 {
t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs))
}
got := resp.TransitIPs[0].Code
if got != TransitIPResponseCode(0) {
t.Fatalf("i=0 TransitIP Code: %d, want 0", got)
}
msg := resp.TransitIPs[0].Message
if msg != "" {
t.Fatalf("i=0 TransitIP Message: \"%s\", want \"%s\"", msg, "")
}
got1 := resp.TransitIPs[1].Code
if got1 != TransitIPResponseCode(1) {
t.Fatalf("i=1 TransitIP Code: %d, want 1", got1)
}
msg1 := resp.TransitIPs[1].Message
if msg1 != dupeTransitIPMessage {
t.Fatalf("i=1 TransitIP Message: \"%s\", want \"%s\"", msg1, dupeTransitIPMessage)
}
got2 := resp.TransitIPs[2].Code
if got2 != TransitIPResponseCode(0) {
t.Fatalf("i=2 TransitIP Code: %d, want 0", got2)
}
msg2 := resp.TransitIPs[2].Message
if msg2 != "" {
t.Fatalf("i=2 TransitIP Message: \"%s\", want \"%s\"", msg, "")
}
gotAddr1 := c.transitIPTarget(nid, tip)
if gotAddr1 != dip {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip)
}
gotAddr2 := c.transitIPTarget(nid, tip2)
if gotAddr2 != dip3 {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip3)
}
}
// TestGetDstIPUnknownTIP tests that unknown transit addresses can be looked up without problem.
func TestTransitIPTargetUnknownTIP(t *testing.T) {
c := &Conn25{}
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
got := c.transitIPTarget(nid, tip)
want := netip.Addr{}
if got != want {
t.Fatalf("Unknown transit addr, want: %v, got %v", want, got)
}
}
func TestPickSplitDNSPeers(t *testing.T) {
getBytesForAttr := func(name string, domains []string, tags []string) []byte {
attr := appctype.AppConnectorAttr{

View File

@ -45,7 +45,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec
github.com/x448/float16 from github.com/fxamacker/cbor/v2
💣 go4.org/mem from tailscale.com/client/local+
go4.org/netipx from tailscale.com/net/tsaddr
go4.org/netipx from tailscale.com/net/tsaddr+
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+
google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt
google.golang.org/protobuf/encoding/prototext from github.com/prometheus/common/expfmt+

View File

@ -149,7 +149,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
github.com/x448/float16 from github.com/fxamacker/cbor/v2
go.yaml.in/yaml/v2 from sigs.k8s.io/yaml
💣 go4.org/mem from tailscale.com/client/local+
go4.org/netipx from tailscale.com/net/tsaddr
go4.org/netipx from tailscale.com/net/tsaddr+
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+
k8s.io/client-go/util/homedir from tailscale.com/cmd/tailscale/cli
sigs.k8s.io/yaml from tailscale.com/cmd/tailscale/cli

View File

@ -249,7 +249,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+
gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+
tailscale.com from tailscale.com/version
tailscale.com/appc from tailscale.com/ipn/ipnlocal+
tailscale.com/appc from tailscale.com/ipn/ipnlocal
💣 tailscale.com/atomicfile from tailscale.com/ipn+
LD tailscale.com/chirp from tailscale.com/cmd/tailscaled
tailscale.com/client/local from tailscale.com/client/web+

View File

@ -9,13 +9,27 @@ package conn25
import (
"encoding/json"
"errors"
"maps"
"net/http"
"net/netip"
"reflect"
"slices"
"sync"
"tailscale.com/appc"
"go4.org/netipx"
"golang.org/x/net/dns/dnsmessage"
"tailscale.com/feature"
"tailscale.com/ipn/ipnext"
"tailscale.com/ipn/ipnlocal"
"tailscale.com/net/dns"
"tailscale.com/tailcfg"
"tailscale.com/types/appctype"
"tailscale.com/types/logger"
"tailscale.com/types/views"
"tailscale.com/util/dnsname"
"tailscale.com/util/mak"
"tailscale.com/util/set"
)
// featureName is the name of the feature implemented by this package.
@ -26,7 +40,8 @@ func init() {
feature.Register(featureName)
newExtension := func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) {
e := &extension{
conn: &appc.Conn25{},
conn: newConn25(logger.WithPrefix(logf, "conn25: ")),
backend: sb,
}
return e, nil
}
@ -46,7 +61,11 @@ func handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter,
// extension is an [ipnext.Extension] managing the connector on platforms
// that import this package.
type extension struct {
conn *appc.Conn25
conn *Conn25 // safe for concurrent access and only set at creation
backend ipnext.SafeBackend // safe for concurrent access and only set at creation
mu sync.Mutex // protects the fields below
isDNSHookRegistered bool
}
// Name implements [ipnext.Extension].
@ -56,6 +75,7 @@ func (e *extension) Name() string {
// Init implements [ipnext.Extension].
func (e *extension) Init(host ipnext.Host) error {
host.Hooks().OnSelfChange.Add(e.onSelfChange)
return nil
}
@ -71,13 +91,13 @@ func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.R
http.Error(w, "Method should be POST", http.StatusMethodNotAllowed)
return
}
var req appc.ConnectorTransitIPRequest
var req ConnectorTransitIPRequest
err := json.NewDecoder(http.MaxBytesReader(w, r.Body, maxBodyBytes+1)).Decode(&req)
if err != nil {
http.Error(w, "Error decoding JSON", http.StatusBadRequest)
return
}
resp := e.conn.HandleConnectorTransitIPRequest(h.Peer().ID(), req)
resp := e.conn.handleConnectorTransitIPRequest(h.Peer().ID(), req)
bs, err := json.Marshal(resp)
if err != nil {
http.Error(w, "Error encoding JSON", http.StatusInternalServerError)
@ -85,3 +105,439 @@ func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.R
}
w.Write(bs)
}
func (e *extension) onSelfChange(selfNode tailcfg.NodeView) {
err := e.conn.reconfig(selfNode)
if err != nil {
e.conn.client.logf("error during Reconfig onSelfChange", err)
return
}
if e.conn.isConfigured() {
err = e.registerDNSHook()
} else {
err = e.unregisterDNSHook()
}
if err != nil {
e.conn.client.logf("error managing DNS hook onSelfChange", err)
}
}
func (e *extension) registerDNSHook() error {
e.mu.Lock()
defer e.mu.Unlock()
if e.isDNSHookRegistered {
return nil
}
err := e.setDNSHookLocked(e.conn.mapDNSResponse)
if err == nil {
e.isDNSHookRegistered = true
}
return err
}
func (e *extension) unregisterDNSHook() error {
e.mu.Lock()
defer e.mu.Unlock()
if !e.isDNSHookRegistered {
return nil
}
err := e.setDNSHookLocked(nil)
if err == nil {
e.isDNSHookRegistered = false
}
return err
}
func (e *extension) setDNSHookLocked(fx dns.ResponseMapper) error {
dnsManager, ok := e.backend.Sys().DNSManager.GetOK()
if !ok || dnsManager == nil {
return errors.New("Couldn't get DNSManager from sys")
}
dnsManager.SetQueryResponseMapper(fx)
return nil
}
type appAddr struct {
app string
addr netip.Addr
}
// Conn25 holds state for routing traffic for a domain via a connector.
type Conn25 struct {
client *client
server *server
}
func (c *Conn25) isConfigured() bool {
return c.client.isConfigured()
}
func newConn25(logf logger.Logf) *Conn25 {
c := &Conn25{
client: &client{logf: logf},
server: &server{logf: logf},
}
return c
}
func ipSetFromIPRanges(rs []netipx.IPRange) (*netipx.IPSet, error) {
b := &netipx.IPSetBuilder{}
for _, r := range rs {
b.AddRange(r)
}
return b.IPSet()
}
func (c *Conn25) reconfig(selfNode tailcfg.NodeView) error {
if err := c.client.reconfig(selfNode); err != nil {
return err
}
if err := c.server.reconfig(selfNode); err != nil {
return err
}
return nil
}
// mapDNSResponse parses and inspects the DNS response, and uses the
// contents to assign addresses for connecting. It does not yet modify
// the response.
func (c *Conn25) mapDNSResponse(buf []byte) []byte {
return c.client.mapDNSResponse(buf)
}
const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest"
// handleConnectorTransitIPRequest creates a ConnectorTransitIPResponse in response to a ConnectorTransitIPRequest.
// It updates the connectors mapping of TransitIP->DestinationIP per peer (tailcfg.NodeID).
// If a peer has stored this mapping in the connector Conn25 will route traffic to TransitIPs to DestinationIPs for that peer.
func (c *Conn25) handleConnectorTransitIPRequest(nid tailcfg.NodeID, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse {
resp := ConnectorTransitIPResponse{}
seen := map[netip.Addr]bool{}
for _, each := range ctipr.TransitIPs {
if seen[each.TransitIP] {
resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{
Code: OtherFailure,
Message: dupeTransitIPMessage,
})
continue
}
tipresp := c.server.handleTransitIPRequest(nid, each)
seen[each.TransitIP] = true
resp.TransitIPs = append(resp.TransitIPs, tipresp)
}
return resp
}
func (s *server) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPRequest) TransitIPResponse {
s.mu.Lock()
defer s.mu.Unlock()
if s.transitIPs == nil {
s.transitIPs = make(map[tailcfg.NodeID]map[netip.Addr]appAddr)
}
peerMap, ok := s.transitIPs[nid]
if !ok {
peerMap = make(map[netip.Addr]appAddr)
s.transitIPs[nid] = peerMap
}
peerMap[tipr.TransitIP] = appAddr{addr: tipr.DestinationIP, app: tipr.App}
return TransitIPResponse{}
}
func (s *server) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr {
s.mu.Lock()
defer s.mu.Unlock()
return s.transitIPs[nid][tip].addr
}
// TransitIPRequest details a single TransitIP allocation request from a client to a
// connector.
type TransitIPRequest struct {
// TransitIP is the intermediate destination IP that will be received at this
// connector and will be replaced by DestinationIP when performing DNAT.
TransitIP netip.Addr `json:"transitIP,omitzero"`
// DestinationIP is the final destination IP that connections to the TransitIP
// should be mapped to when performing DNAT.
DestinationIP netip.Addr `json:"destinationIP,omitzero"`
// App is the name of the connector application from the tailnet
// configuration.
App string `json:"app,omitzero"`
}
// ConnectorTransitIPRequest is the request body for a PeerAPI request to
// /connector/transit-ip and can include zero or more TransitIP allocation requests.
type ConnectorTransitIPRequest struct {
// TransitIPs is the list of requested mappings.
TransitIPs []TransitIPRequest `json:"transitIPs,omitempty"`
}
// TransitIPResponseCode appears in TransitIPResponse and signifies success or failure status.
type TransitIPResponseCode int
const (
// OK indicates that the mapping was created as requested.
OK TransitIPResponseCode = 0
// OtherFailure indicates that the mapping failed for a reason that does not have
// another relevant [TransitIPResponsecode].
OtherFailure TransitIPResponseCode = 1
)
// TransitIPResponse is the response to a TransitIPRequest
type TransitIPResponse struct {
// Code is an error code indicating success or failure of the [TransitIPRequest].
Code TransitIPResponseCode `json:"code,omitzero"`
// Message is an error message explaining what happened, suitable for logging but
// not necessarily suitable for displaying in a UI to non-technical users. It
// should be empty when [Code] is [OK].
Message string `json:"message,omitzero"`
}
// ConnectorTransitIPResponse is the response to a ConnectorTransitIPRequest
type ConnectorTransitIPResponse struct {
// TransitIPs is the list of outcomes for each requested mapping. Elements
// correspond to the order of [ConnectorTransitIPRequest.TransitIPs].
TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"`
}
const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental"
// configSrc is the parts of the selfNode NodeView that affect
// the state of config. Used to check if a reconfig with a selfNode
// will change the state of config, to avoid unnecessary reconfigs.
type configSrc struct {
apps []tailcfg.RawMessage
tags []string
}
func (cs *configSrc) matches(other *configSrc) bool {
return reflect.DeepEqual(cs, other)
}
func configSrcFromNodeView(n tailcfg.NodeView) *configSrc {
return &configSrc{
apps: n.CapMap().Get(AppConnectorsExperimentalAttrName).AsSlice(),
tags: n.Tags().AsSlice(),
}
}
// config holds the config from the policy and lookups derived from that.
// config is not safe for concurrent use.
type config struct {
src *configSrc
apps []appctype.Conn25Attr
appsByDomain map[string][]string
selfRoutedDomains set.Set[string]
}
func (c *config) reconfig(selfNode tailcfg.NodeView) (bool, error) {
cfgSrc := configSrcFromNodeView(selfNode)
if cfgSrc.matches(c.src) {
return false, nil
}
return c.reconfigFromCfgSrc(cfgSrc)
}
func (c *config) reconfigFromCfgSrc(src *configSrc) (bool, error) {
msv := views.MapSliceOf(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
AppConnectorsExperimentalAttrName: src.apps,
})
apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.Conn25Attr](msv, AppConnectorsExperimentalAttrName)
if err != nil {
return true, err
}
selfTags := set.SetOf(src.tags)
c.src = src
c.apps = apps
c.appsByDomain = map[string][]string{}
c.selfRoutedDomains = set.Set[string]{}
for _, app := range apps {
selfMatchesTags := false
for _, tag := range app.Connectors {
if selfTags.Contains(tag) {
selfMatchesTags = true
break
}
}
for _, d := range app.Domains {
fqdn, err := dnsname.ToFQDN(d)
if err != nil {
return true, err
}
key := fqdn.WithTrailingDot()
mak.Set(&c.appsByDomain, key, append(c.appsByDomain[key], app.Name))
if selfMatchesTags {
c.selfRoutedDomains.Add(key)
}
}
}
return true, nil
}
// client performs the conn25 functionality for clients of connectors
// It allocates magic and transit IP addresses and communicates them with
// connectors.
// It's safe for concurrent use.
type client struct {
logf logger.Logf
mu sync.Mutex // protects the fields below
magicIPPool *ippool
transitIPPool *ippool
// map of magic IP -> (transit IP, app)
magicIPs map[netip.Addr]appAddr
config config
}
func (c *client) isConfigured() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.config.src != nil
}
func (c *client) reconfig(selfNode tailcfg.NodeView) error {
c.mu.Lock()
defer c.mu.Unlock()
updated, err := c.config.reconfig(selfNode)
if !updated || err != nil {
return err
}
c.logf("client reconfigured, domains: %v", slices.Collect(maps.Keys(c.config.appsByDomain)))
// TODO(fran) this is not the correct way to manage the pools and changes to the pools.
// We probably want to:
// * check the pools haven't changed
// * reset the whole connector if the pools change? or just if they've changed to exclude
// addresses we have in use?
// * have config separate from the apps for this (rather than multiple potentially conflicting places)
// but this works while we are just getting started here.
for _, app := range c.config.apps {
if c.magicIPPool != nil { // just take the first config and never reconfig
break
}
if app.MagicIPPool == nil {
continue
}
mipp, err := ipSetFromIPRanges(app.MagicIPPool)
if err != nil {
return err
}
tipp, err := ipSetFromIPRanges(app.TransitIPPool)
if err != nil {
return err
}
c.magicIPPool = newIPPool(mipp)
c.transitIPPool = newIPPool(tipp)
}
return nil
}
func (c *client) setMagicIP(magicAddr, transitAddr netip.Addr, app string) {
c.mu.Lock()
defer c.mu.Unlock()
mak.Set(&c.magicIPs, magicAddr, appAddr{addr: transitAddr, app: app})
}
func (c *client) reserveAddresses(domain string, dst netip.Addr) (connection, error) {
c.mu.Lock()
defer c.mu.Unlock()
appNames, ok := c.config.appsByDomain[domain]
// Is this domain routed by connectors?
if !ok || len(appNames) == 0 {
return connection{}, nil
}
// We don't ask another connector to route (and so don't reserve addresses for)
// domains that we ourselves route as a connector.
if c.config.selfRoutedDomains.Contains(domain) {
return connection{}, nil
}
// only reserve for first app
app := appNames[0]
mip, err := c.magicIPPool.next()
if err != nil {
return connection{}, err
}
tip, err := c.transitIPPool.next()
if err != nil {
return connection{}, err
}
connection := connection{
dst: dst,
magic: mip,
transit: tip,
app: app,
}
c.logf("assigning magic ip for domain: %s, app: %s, %v", domain, app, mip)
return connection, nil
}
func (c *client) enqueueAddressAssignment(conn connection) {
c.setMagicIP(conn.magic, conn.transit, conn.app)
// TODO(fran) 2026-02-03 asynchronously send peerapi req to connector to
// allocate these addresses for us.
}
func (c *client) mapDNSResponse(buf []byte) []byte {
var msg dnsmessage.Message
err := msg.Unpack(buf)
if err != nil {
return buf
}
for _, a := range msg.Answers {
// TODO(fran) AAAA?
switch a.Header.Type {
case dnsmessage.TypeA:
msgARecord := (a.Body).(*dnsmessage.AResource)
domain := a.Header.Name.String()
dst := netip.AddrFrom4(msgARecord.A)
connection, err := c.reserveAddresses(domain, dst)
if err != nil {
// TODO(fran) log
return buf
}
if !connection.isValid() {
return buf
}
c.enqueueAddressAssignment(connection)
}
}
// TODO(fran) 2026-01-21 return a dns response with addresses
// swapped out for the magic IPs to make conn25 work.
return buf
}
type server struct {
logf logger.Logf
mu sync.Mutex // protects the fields below
// transitIPs is a map of connector client peer NodeID -> client transitIPs that we update as connector client peers instruct us to, and then use to route traffic to its destination on behalf of connector clients.
transitIPs map[tailcfg.NodeID]map[netip.Addr]appAddr
config config
}
func (s *server) reconfig(selfNode tailcfg.NodeView) error {
s.mu.Lock()
defer s.mu.Unlock()
_, err := s.config.reconfig(selfNode)
return err
}
type connection struct {
dst netip.Addr
magic netip.Addr
transit netip.Addr
app string
}
func (c connection) isValid() bool {
return c.dst.IsValid()
}

View File

@ -0,0 +1,544 @@
// Copyright (c) Tailscale Inc & contributors
// SPDX-License-Identifier: BSD-3-Clause
package conn25
import (
"encoding/json"
"net/netip"
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go4.org/netipx"
"golang.org/x/net/dns/dnsmessage"
"tailscale.com/tailcfg"
"tailscale.com/types/appctype"
"tailscale.com/types/logger"
"tailscale.com/util/set"
)
func mustIPSetFromPrefix(s string) *netipx.IPSet {
b := &netipx.IPSetBuilder{}
b.AddPrefix(netip.MustParsePrefix(s))
set, err := b.IPSet()
if err != nil {
panic(err)
}
return set
}
// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a
// ConnectorTransitIPRequest with 0 TransitIPRequests, we respond with a
// ConnectorTransitIPResponse with 0 TransitIPResponses.
func TestHandleConnectorTransitIPRequestZeroLength(t *testing.T) {
c := newConn25(logger.Discard)
req := ConnectorTransitIPRequest{}
nid := tailcfg.NodeID(1)
resp := c.handleConnectorTransitIPRequest(nid, req)
if len(resp.TransitIPs) != 0 {
t.Fatalf("n TransitIPs in response: %d, want 0", len(resp.TransitIPs))
}
}
// TestHandleConnectorTransitIPRequestStoresAddr tests that if sent a
// request with a transit addr and a destination addr we store that mapping
// and can retrieve it. If sent another req with a different dst for that transit addr
// we store that instead.
func TestHandleConnectorTransitIPRequestStoresAddr(t *testing.T) {
c := newConn25(logger.Discard)
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
dip := netip.MustParseAddr("1.2.3.4")
dip2 := netip.MustParseAddr("1.2.3.5")
mr := func(t, d netip.Addr) ConnectorTransitIPRequest {
return ConnectorTransitIPRequest{
TransitIPs: []TransitIPRequest{
{TransitIP: t, DestinationIP: d},
},
}
}
resp := c.handleConnectorTransitIPRequest(nid, mr(tip, dip))
if len(resp.TransitIPs) != 1 {
t.Fatalf("n TransitIPs in response: %d, want 1", len(resp.TransitIPs))
}
got := resp.TransitIPs[0].Code
if got != TransitIPResponseCode(0) {
t.Fatalf("TransitIP Code: %d, want 0", got)
}
gotAddr := c.server.transitIPTarget(nid, tip)
if gotAddr != dip {
t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip)
}
// mapping can be overwritten
resp2 := c.handleConnectorTransitIPRequest(nid, mr(tip, dip2))
if len(resp2.TransitIPs) != 1 {
t.Fatalf("n TransitIPs in response: %d, want 1", len(resp2.TransitIPs))
}
got2 := resp.TransitIPs[0].Code
if got2 != TransitIPResponseCode(0) {
t.Fatalf("TransitIP Code: %d, want 0", got2)
}
gotAddr2 := c.server.transitIPTarget(nid, tip)
if gotAddr2 != dip2 {
t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip2)
}
}
// TestHandleConnectorTransitIPRequestMultipleTIP tests that we can
// get a req with multiple mappings and we store them all. Including
// multiple transit addrs for the same destination.
func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) {
c := newConn25(logger.Discard)
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
tip2 := netip.MustParseAddr("0.0.0.2")
tip3 := netip.MustParseAddr("0.0.0.3")
dip := netip.MustParseAddr("1.2.3.4")
dip2 := netip.MustParseAddr("1.2.3.5")
req := ConnectorTransitIPRequest{
TransitIPs: []TransitIPRequest{
{TransitIP: tip, DestinationIP: dip},
{TransitIP: tip2, DestinationIP: dip2},
// can store same dst addr for multiple transit addrs
{TransitIP: tip3, DestinationIP: dip},
},
}
resp := c.handleConnectorTransitIPRequest(nid, req)
if len(resp.TransitIPs) != 3 {
t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs))
}
for i := 0; i < 3; i++ {
got := resp.TransitIPs[i].Code
if got != TransitIPResponseCode(0) {
t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got)
}
}
gotAddr1 := c.server.transitIPTarget(nid, tip)
if gotAddr1 != dip {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip)
}
gotAddr2 := c.server.transitIPTarget(nid, tip2)
if gotAddr2 != dip2 {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip2)
}
gotAddr3 := c.server.transitIPTarget(nid, tip3)
if gotAddr3 != dip {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip3, gotAddr3, dip)
}
}
// TestHandleConnectorTransitIPRequestSameTIP tests that if we get
// a req that has more than one TransitIPRequest for the same transit addr
// only the first is stored, and the subsequent ones get an error code and
// message in the response.
func TestHandleConnectorTransitIPRequestSameTIP(t *testing.T) {
c := newConn25(logger.Discard)
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
tip2 := netip.MustParseAddr("0.0.0.2")
dip := netip.MustParseAddr("1.2.3.4")
dip2 := netip.MustParseAddr("1.2.3.5")
dip3 := netip.MustParseAddr("1.2.3.6")
req := ConnectorTransitIPRequest{
TransitIPs: []TransitIPRequest{
{TransitIP: tip, DestinationIP: dip},
// cannot have dupe TransitIPs in one ConnectorTransitIPRequest
{TransitIP: tip, DestinationIP: dip2},
{TransitIP: tip2, DestinationIP: dip3},
},
}
resp := c.handleConnectorTransitIPRequest(nid, req)
if len(resp.TransitIPs) != 3 {
t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs))
}
got := resp.TransitIPs[0].Code
if got != TransitIPResponseCode(0) {
t.Fatalf("i=0 TransitIP Code: %d, want 0", got)
}
msg := resp.TransitIPs[0].Message
if msg != "" {
t.Fatalf("i=0 TransitIP Message: \"%s\", want \"%s\"", msg, "")
}
got1 := resp.TransitIPs[1].Code
if got1 != TransitIPResponseCode(1) {
t.Fatalf("i=1 TransitIP Code: %d, want 1", got1)
}
msg1 := resp.TransitIPs[1].Message
if msg1 != dupeTransitIPMessage {
t.Fatalf("i=1 TransitIP Message: \"%s\", want \"%s\"", msg1, dupeTransitIPMessage)
}
got2 := resp.TransitIPs[2].Code
if got2 != TransitIPResponseCode(0) {
t.Fatalf("i=2 TransitIP Code: %d, want 0", got2)
}
msg2 := resp.TransitIPs[2].Message
if msg2 != "" {
t.Fatalf("i=2 TransitIP Message: \"%s\", want \"%s\"", msg, "")
}
gotAddr1 := c.server.transitIPTarget(nid, tip)
if gotAddr1 != dip {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip)
}
gotAddr2 := c.server.transitIPTarget(nid, tip2)
if gotAddr2 != dip3 {
t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip3)
}
}
// TestGetDstIPUnknownTIP tests that unknown transit addresses can be looked up without problem.
func TestTransitIPTargetUnknownTIP(t *testing.T) {
c := newConn25(logger.Discard)
nid := tailcfg.NodeID(1)
tip := netip.MustParseAddr("0.0.0.1")
got := c.server.transitIPTarget(nid, tip)
want := netip.Addr{}
if got != want {
t.Fatalf("Unknown transit addr, want: %v, got %v", want, got)
}
}
func TestSetMagicIP(t *testing.T) {
c := newConn25(logger.Discard)
mip := netip.MustParseAddr("0.0.0.1")
tip := netip.MustParseAddr("0.0.0.2")
app := "a"
c.client.setMagicIP(mip, tip, app)
val, ok := c.client.magicIPs[mip]
if !ok {
t.Fatal("expected there to be a value stored for the magic IP")
}
if val.addr != tip {
t.Fatalf("want %v, got %v", tip, val.addr)
}
if val.app != app {
t.Fatalf("want %s, got %s", app, val.app)
}
}
func TestReserveIPs(t *testing.T) {
c := newConn25(logger.Discard)
c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24"))
c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24"))
mbd := map[string][]string{}
mbd["example.com."] = []string{"a"}
c.client.config.appsByDomain = mbd
dst := netip.MustParseAddr("0.0.0.1")
con, err := c.client.reserveAddresses("example.com.", dst)
if err != nil {
t.Fatal(err)
}
wantDst := netip.MustParseAddr("0.0.0.1") // same as dst we pass in
wantMagic := netip.MustParseAddr("100.64.0.0") // first from magic pool
wantTransit := netip.MustParseAddr("169.254.0.0") // first from transit pool
wantApp := "a" // the app name related to example.com.
if wantDst != con.dst {
t.Errorf("want %v, got %v", wantDst, con.dst)
}
if wantMagic != con.magic {
t.Errorf("want %v, got %v", wantMagic, con.magic)
}
if wantTransit != con.transit {
t.Errorf("want %v, got %v", wantTransit, con.transit)
}
if wantApp != con.app {
t.Errorf("want %s, got %s", wantApp, con.app)
}
}
func TestReconfig(t *testing.T) {
rawCfg := `{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}`
capMap := tailcfg.NodeCapMap{
tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{
tailcfg.RawMessage(rawCfg),
},
}
c := newConn25(logger.Discard)
sn := (&tailcfg.Node{
CapMap: capMap,
}).View()
err := c.reconfig(sn)
if err != nil {
t.Fatal(err)
}
if len(c.client.config.apps) != 1 || c.client.config.apps[0].Name != "app1" {
t.Fatalf("want apps to have one entry 'app1', got %v", c.client.config.apps)
}
}
func TestConfigReconfig(t *testing.T) {
for _, tt := range []struct {
name string
rawCfg string
cfg []appctype.Conn25Attr
tags []string
wantErr bool
wantAppsByDomain map[string][]string
wantSelfRoutedDomains set.Set[string]
}{
{
name: "bad-config",
rawCfg: `bad`,
wantErr: true,
},
{
name: "simple",
cfg: []appctype.Conn25Attr{
{Name: "one", Domains: []string{"a.example.com"}, Connectors: []string{"tag:one"}},
{Name: "two", Domains: []string{"b.example.com"}, Connectors: []string{"tag:two"}},
},
tags: []string{"tag:one"},
wantAppsByDomain: map[string][]string{
"a.example.com.": {"one"},
"b.example.com.": {"two"},
},
wantSelfRoutedDomains: set.SetOf([]string{"a.example.com."}),
},
{
name: "more-complex",
cfg: []appctype.Conn25Attr{
{Name: "one", Domains: []string{"1.a.example.com", "1.b.example.com"}, Connectors: []string{"tag:one", "tag:onea"}},
{Name: "two", Domains: []string{"2.b.example.com", "2.c.example.com"}, Connectors: []string{"tag:two", "tag:twoa"}},
{Name: "three", Domains: []string{"1.b.example.com", "1.c.example.com"}, Connectors: []string{}},
{Name: "four", Domains: []string{"4.b.example.com", "4.d.example.com"}, Connectors: []string{"tag:four"}},
},
tags: []string{"tag:onea", "tag:four", "tag:unrelated"},
wantAppsByDomain: map[string][]string{
"1.a.example.com.": {"one"},
"1.b.example.com.": {"one", "three"},
"1.c.example.com.": {"three"},
"2.b.example.com.": {"two"},
"2.c.example.com.": {"two"},
"4.b.example.com.": {"four"},
"4.d.example.com.": {"four"},
},
wantSelfRoutedDomains: set.SetOf([]string{"1.a.example.com.", "1.b.example.com.", "4.b.example.com.", "4.d.example.com."}),
},
} {
t.Run(tt.name, func(t *testing.T) {
cfg := []tailcfg.RawMessage{tailcfg.RawMessage(tt.rawCfg)}
if tt.cfg != nil {
cfg = []tailcfg.RawMessage{}
for _, attr := range tt.cfg {
bs, err := json.Marshal(attr)
if err != nil {
t.Fatalf("unexpected error in test setup: %v", err)
}
cfg = append(cfg, tailcfg.RawMessage(bs))
}
}
capMap := tailcfg.NodeCapMap{
tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): cfg,
}
sn := (&tailcfg.Node{
CapMap: capMap,
Tags: tt.tags,
}).View()
c := &config{}
_, err := c.reconfig(sn)
if (err != nil) != tt.wantErr {
t.Fatalf("wantErr: %t, err: %v", tt.wantErr, err)
}
if diff := cmp.Diff(tt.wantAppsByDomain, c.appsByDomain); diff != "" {
t.Errorf("appsByDomain diff (-want, +got):\n%s", diff)
}
if diff := cmp.Diff(tt.wantSelfRoutedDomains, c.selfRoutedDomains); diff != "" {
t.Errorf("selfRoutedDomains diff (-want, +got):\n%s", diff)
}
})
}
}
func makeSelfNode(t *testing.T, attr appctype.Conn25Attr, tags []string) tailcfg.NodeView {
t.Helper()
bs, err := json.Marshal(attr)
if err != nil {
t.Fatalf("unexpected error in test setup: %v", err)
}
cfg := []tailcfg.RawMessage{tailcfg.RawMessage(bs)}
capMap := tailcfg.NodeCapMap{
tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): cfg,
}
return (&tailcfg.Node{
CapMap: capMap,
Tags: tags,
}).View()
}
func rangeFrom(from, to string) netipx.IPRange {
return netipx.IPRangeFrom(
netip.MustParseAddr("100.64.0."+from),
netip.MustParseAddr("100.64.0."+to),
)
}
func TestConfigReconfigUpdate(t *testing.T) {
c := &config{}
attr := appctype.Conn25Attr{
Name: "a",
Domains: []string{"a.example.com", "b.example.com"},
Connectors: []string{"tag:a", "tag:b"},
MagicIPPool: []netipx.IPRange{rangeFrom("0", "1"), rangeFrom("2", "3")},
TransitIPPool: []netipx.IPRange{rangeFrom("3", "4"), rangeFrom("7", "70")},
}
tags := []string{"tag:a", "tag:b"}
var updated bool
var err error
assertChanged := func() {
t.Helper()
updated, err = c.reconfig(makeSelfNode(t, attr, tags))
if err != nil {
t.Fatal(err)
}
if !updated {
t.Fatal("want updated true")
}
}
assertChanged()
updated, err = c.reconfig(makeSelfNode(t, attr, tags))
if err != nil {
t.Fatal(err)
}
if updated {
t.Fatal("expected reconfig with same config to return updated false")
}
attr.Name = "b"
assertChanged()
attr.Domains = []string{"woo"}
assertChanged()
attr.Connectors = []string{"woo"}
assertChanged()
attr.MagicIPPool = []netipx.IPRange{rangeFrom("100", "200")}
assertChanged()
attr.TransitIPPool = []netipx.IPRange{rangeFrom("100", "200")}
assertChanged()
tags = []string{"woo"}
assertChanged()
}
func TestMapDNSResponse(t *testing.T) {
makeDNSResponse := func(domain string, addrs []dnsmessage.AResource) []byte {
b := dnsmessage.NewBuilder(nil,
dnsmessage.Header{
ID: 1,
Response: true,
Authoritative: true,
RCode: dnsmessage.RCodeSuccess,
})
b.EnableCompression()
if err := b.StartQuestions(); err != nil {
t.Fatal(err)
}
if err := b.Question(dnsmessage.Question{
Name: dnsmessage.MustNewName(domain),
Type: dnsmessage.TypeA,
Class: dnsmessage.ClassINET,
}); err != nil {
t.Fatal(err)
}
if err := b.StartAnswers(); err != nil {
t.Fatal(err)
}
for _, addr := range addrs {
b.AResource(
dnsmessage.ResourceHeader{
Name: dnsmessage.MustNewName(domain),
Type: dnsmessage.TypeA,
Class: dnsmessage.ClassINET,
},
addr,
)
}
outbs, err := b.Finish()
if err != nil {
t.Fatal(err)
}
return outbs
}
for _, tt := range []struct {
name string
domain string
addrs []dnsmessage.AResource
wantMagicIPs map[netip.Addr]appAddr
}{
{
name: "one-ip-matches",
domain: "example.com.",
addrs: []dnsmessage.AResource{{A: [4]byte{1, 0, 0, 0}}},
// these are 'expected' because they are the beginning of the provided pools
wantMagicIPs: map[netip.Addr]appAddr{
netip.MustParseAddr("100.64.0.0"): {app: "app1", addr: netip.MustParseAddr("100.64.0.40")},
},
},
{
name: "multiple-ip-matches",
domain: "example.com.",
addrs: []dnsmessage.AResource{
{A: [4]byte{1, 0, 0, 0}},
{A: [4]byte{2, 0, 0, 0}},
},
wantMagicIPs: map[netip.Addr]appAddr{
netip.MustParseAddr("100.64.0.0"): {app: "app1", addr: netip.MustParseAddr("100.64.0.40")},
netip.MustParseAddr("100.64.0.1"): {app: "app1", addr: netip.MustParseAddr("100.64.0.41")},
},
},
{
name: "no-domain-match",
domain: "x.example.com.",
addrs: []dnsmessage.AResource{
{A: [4]byte{1, 0, 0, 0}},
{A: [4]byte{2, 0, 0, 0}},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
dnsResp := makeDNSResponse(tt.domain, tt.addrs)
sn := makeSelfNode(t, appctype.Conn25Attr{
Name: "app1",
Connectors: []string{"tag:woo"},
Domains: []string{"example.com"},
MagicIPPool: []netipx.IPRange{rangeFrom("0", "10"), rangeFrom("20", "30")},
TransitIPPool: []netipx.IPRange{rangeFrom("40", "50")},
}, []string{})
c := newConn25(logger.Discard)
c.reconfig(sn)
bs := c.mapDNSResponse(dnsResp)
if !reflect.DeepEqual(dnsResp, bs) {
t.Fatal("shouldn't be changing the bytes (yet)")
}
if diff := cmp.Diff(tt.wantMagicIPs, c.client.magicIPs, cmpopts.EquateComparable(appAddr{}, netip.Addr{})); diff != "" {
t.Errorf("magicIPs diff (-want, +got):\n%s", diff)
}
})
}
}

View File

@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & contributors
// SPDX-License-Identifier: BSD-3-Clause
package appc
package conn25
import (
"errors"

View File

@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & contributors
// SPDX-License-Identifier: BSD-3-Clause
package appc
package conn25
import (
"errors"

View File

@ -46,6 +46,11 @@ var (
// be running.
const maxActiveQueries = 256
// ResponseMapper is a function that accepts the bytes representing
// a DNS response and returns bytes representing a DNS response.
// Used to observe and/or mutate DNS responses managed by this manager.
type ResponseMapper func([]byte) []byte
// We use file-ignore below instead of ignore because on some platforms,
// the lint exception is necessary and on others it is not,
// and plain ignore complains if the exception is unnecessary.
@ -67,8 +72,9 @@ type Manager struct {
knobs *controlknobs.Knobs // or nil
goos string // if empty, gets set to runtime.GOOS
mu sync.Mutex // guards following
config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called.
mu sync.Mutex // guards following
config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called.
queryResponseMapper ResponseMapper
}
// NewManager created a new manager from the given config.
@ -467,7 +473,16 @@ func (m *Manager) Query(ctx context.Context, bs []byte, family string, from neti
return nil, errFullQueue
}
defer atomic.AddInt32(&m.activeQueriesAtomic, -1)
return m.resolver.Query(ctx, bs, family, from)
outbs, err := m.resolver.Query(ctx, bs, family, from)
if err != nil {
return outbs, err
}
m.mu.Lock()
defer m.mu.Unlock()
if m.queryResponseMapper != nil {
outbs = m.queryResponseMapper(outbs)
}
return outbs, err
}
const (
@ -653,3 +668,9 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health
}
var metricDNSQueryErrorQueue = clientmetric.NewCounter("dns_query_local_error_queue")
func (m *Manager) SetQueryResponseMapper(fx ResponseMapper) {
m.mu.Lock()
defer m.mu.Unlock()
m.queryResponseMapper = fx
}

View File

@ -8,6 +8,7 @@ package appctype
import (
"net/netip"
"go4.org/netipx"
"tailscale.com/tailcfg"
)
@ -93,3 +94,17 @@ type RouteUpdate struct {
Advertise []netip.Prefix
Unadvertise []netip.Prefix
}
type Conn25Attr struct {
// Name is the name of this collection of domains.
Name string `json:"name,omitempty"`
// Domains enumerates the domains serviced by the specified app connectors.
// Domains can be of the form: example.com, or *.example.com.
Domains []string `json:"domains,omitempty"`
// Connectors enumerates the app connectors which service these domains.
// These can either be "*" to match any advertising connector, or a
// tag of the form tag:<tag-name>.
Connectors []string `json:"connectors,omitempty"`
MagicIPPool []netipx.IPRange `json:"magicIPPool,omitempty"`
TransitIPPool []netipx.IPRange `json:"transitIPPool,omitempty"`
}