diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go index 4e144ce40..38bfca1b2 100644 --- a/feature/conn25/conn25.go +++ b/feature/conn25/conn25.go @@ -19,6 +19,7 @@ import ( "slices" "strings" "sync" + "sync/atomic" "time" "go4.org/netipx" @@ -123,11 +124,12 @@ func (e *extension) Init(host ipnext.Host) error { } e.host = host - dph := newDatapathHandler(e.conn25, e.conn25.client.logf) + dph := newDatapathHandler(e.conn25, e.conn25.logf) if err := e.installHooks(dph); err != nil { return err } - e.seedPrefsConfig() + profile, prefs := e.host.Profiles().CurrentProfileState() + e.profileStateChange(profile, prefs, false) ctx, cancel := context.WithCancelCause(context.Background()) e.ctxCancel = cancel @@ -224,28 +226,42 @@ func (e *extension) installHooks(dph *datapathHandler) error { return nil } -// seedPrefsConfig provides an initial prefs config before -// any hooks fire. The OnSelfChange hook needs to fire -// for Conn25 to be fully configured and ready to use. -func (e *extension) seedPrefsConfig() { - var cfg config - cfg.prefs = configFromPrefs(e.host.Profiles().CurrentPrefs()) - e.conn25.reconfig(cfg) -} - // ClientTransitIPForMagicIP implements [IPMapper]. func (c *Conn25) ClientTransitIPForMagicIP(m netip.Addr) (netip.Addr, error) { - return c.client.transitIPForMagicIP(m) + if addr, ok := c.client.transitIPForMagicIP(m); ok { + return addr, nil + } + cfg, ok := c.getConfig() + if !ok { + return netip.Addr{}, nil + } + if !cfg.ipSets.v4Magic.Contains(m) && !cfg.ipSets.v6Magic.Contains(m) { + return netip.Addr{}, nil + } + return netip.Addr{}, ErrUnmappedMagicIP } // ConnectorRealIPForTransitIPConnection implements [IPMapper]. func (c *Conn25) ConnectorRealIPForTransitIPConnection(src, transit netip.Addr) (netip.Addr, error) { - return c.connector.realIPForTransitIPConnection(src, transit) + if addr, ok := c.connector.realIPForTransitIPConnection(src, transit); ok { + return addr, nil + } + cfg, ok := c.getConfig() + if !ok { + return netip.Addr{}, nil + } + if !cfg.ipSets.v4Transit.Contains(transit) && !cfg.ipSets.v6Transit.Contains(transit) { + return netip.Addr{}, nil + } + return netip.Addr{}, ErrUnmappedSrcAndTransitIP } func (e *extension) getMagicRange() views.Slice[netip.Prefix] { - cfg := e.conn25.client.getConfig() - return views.SliceOf(slices.Concat(cfg.nv.v4MagicIPSet.Prefixes(), cfg.nv.v6MagicIPSet.Prefixes())) + cfg, ok := e.conn25.getConfig() + if !ok { + return views.Slice[netip.Prefix]{} + } + return views.SliceOf(slices.Concat(cfg.ipSets.v4Magic.Prefixes(), cfg.ipSets.v6Magic.Prefixes())) } // Shutdown implements [ipnlocal.Extension]. @@ -281,29 +297,20 @@ func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.R } // onSelfChange implements the [ipnext.Hooks.OnSelfChange] hook. -// It reads then modifies the current config. We expect that OnSelfChange -// is not called concurrently with itself or with ProfileStateChange to -// prevent TOCTOU errors. func (e *extension) onSelfChange(selfNode tailcfg.NodeView) { - cfg := e.conn25.client.getConfig() - nvCfg, err := configFromNodeView(selfNode) + cfg, err := configFromNodeView(selfNode) if err != nil { - e.conn25.client.logf("error generating config from self node view: %v", err) + e.conn25.logf("error generating config from self node view: %v", err) return } - cfg.nv = nvCfg e.conn25.reconfig(cfg) } // profileStateChange implements the [ipnext.Hooks.ProfileStateChange] hook. -// It reads then modifies the current config. We expect that ProfileStateChange -// is not called concurrently with itself or with OnSelfChange to -// prevent TOCTOU errors. func (e *extension) profileStateChange(loginProfile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { // TODO(mzb): Handle node changes. Wipe out all config? - cfg := e.conn25.client.getConfig() - cfg.prefs = configFromPrefs(prefs) - e.conn25.reconfig(cfg) + // We'll need to look at the ordering of this hook and onSelfChange. + e.conn25.prefsAdvertiseConnector.Store(prefs.AppConnector().Advertise) } func (e *extension) extraWireGuardAllowedIPs(k key.NodePublic) views.Slice[netip.Prefix] { @@ -317,24 +324,41 @@ type appAddr struct { // Conn25 holds state for routing traffic for a domain via a connector. type Conn25 struct { - mu sync.Mutex // mu protects reconfiguration of client and connector - client *client - connector *connector + config atomic.Pointer[config] + prefsAdvertiseConnector atomic.Bool + logf logger.Logf + client *client + connector *connector +} + +func (c *Conn25) getConfig() (*config, bool) { + cfg := c.config.Load() + return cfg, cfg.isConfigured } func (c *Conn25) isConfigured() bool { - return c.client.isConfigured() + _, ok := c.getConfig() + return ok } func newConn25(logf logger.Logf) *Conn25 { c := &Conn25{ - client: &client{ - logf: logf, - addrsCh: make(chan addrs, 64), - assignments: addrAssignments{clock: tstime.StdClock{}}, - }, + logf: logf, connector: &connector{logf: logf}, } + c.config.Store(&config{}) // initialize with empty to avoid nil checks + c.client = &client{ + logf: logf, + addrsCh: make(chan addrs, 64), + assignments: addrAssignments{clock: tstime.StdClock{}}, + getIPSets: func() ipSets { + cfg, ok := c.getConfig() + if !ok { + return emptyIPSets() + } + return cfg.ipSets + }, + } return c } @@ -346,18 +370,9 @@ func ipSetFromIPRanges(rs []netipx.IPRange) (*netipx.IPSet, error) { return b.IPSet() } -func (c *Conn25) reconfig(cfg config) { - c.mu.Lock() - defer c.mu.Unlock() - - c.client.reconfig(cfg) - c.connector.reconfig(cfg) -} - -// mapDNSResponse parses and inspects the DNS response, and uses the -// contents to assign addresses for connecting. -func (c *Conn25) mapDNSResponse(buf []byte) []byte { - return c.client.mapDNSResponse(buf) +func (c *Conn25) reconfig(cfg *config) { + c.config.Store(cfg) + c.client.reconfig() } const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest" @@ -371,6 +386,21 @@ const unknownAppNameMessage = "The App name in the request does not match a conf // family of the transitIP). If a peer has stored this mapping in the connector, // Conn25 will route traffic to TransitIPs to DestinationIPs for that peer. func (c *Conn25) handleConnectorTransitIPRequest(n tailcfg.NodeView, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse { + resp := ConnectorTransitIPResponse{} + cfg, ok := c.getConfig() + if !ok { + // TODO(mzb): If this node is no longer configured at the + // the time of this call, perhaps there should be a top-level + // error, instead of error-per-TransitIP? + for range ctipr.TransitIPs { + resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{ + Code: UnknownAppName, + Message: unknownAppNameMessage, + }) + } + return resp + } + var peerIPv4, peerIPv6 netip.Addr for _, ip := range n.Addresses().All() { if !ip.IsSingleIP() || !tsaddr.IsTailscaleIP(ip.Addr()) { @@ -383,7 +413,6 @@ func (c *Conn25) handleConnectorTransitIPRequest(n tailcfg.NodeView, ctipr Conne } } - resp := ConnectorTransitIPResponse{} seen := map[netip.Addr]bool{} for _, each := range ctipr.TransitIPs { if seen[each.TransitIP] { @@ -391,10 +420,20 @@ func (c *Conn25) handleConnectorTransitIPRequest(n tailcfg.NodeView, ctipr Conne Code: DuplicateTransitIP, Message: dupeTransitIPMessage, }) - c.connector.logf("[Unexpected] peer attempt to map a transit IP reused a transitIP: node: %s, IP: %v", + c.logf("[Unexpected] peer attempt to map a transit IP reused a transitIP: node: %s, IP: %v", n.StableID(), each.TransitIP) continue } + + if _, ok := cfg.appsByName[each.App]; !ok { + resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{ + Code: UnknownAppName, + Message: unknownAppNameMessage, + }) + c.logf("[Unexpected] peer attempt to map a transit IP referenced unknown app: node: %s, app: %q", + n.StableID(), each.App) + continue + } tipresp := c.connector.handleTransitIPRequest(n, peerIPv4, peerIPv6, each) seen[each.TransitIP] = true resp.TransitIPs = append(resp.TransitIPs, tipresp) @@ -427,12 +466,6 @@ func (c *connector) handleTransitIPRequest(n tailcfg.NodeView, peerV4 netip.Addr c.mu.Lock() defer c.mu.Unlock() - if _, ok := c.config.nv.appsByName[tipr.App]; !ok { - c.logf("[Unexpected] peer attempt to map a transit IP referenced unknown app: node: %s, app: %q", - n.StableID(), tipr.App) - return TransitIPResponse{Code: UnknownAppName, Message: unknownAppNameMessage} - } - if c.transitIPs == nil { c.transitIPs = make(map[netip.Addr]map[netip.Addr]appAddr) } @@ -515,43 +548,58 @@ type ConnectorTransitIPResponse struct { const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental" -// nodeViewConfig holds the config derived from the self node view, +// ipSets wraps all the IPSets the config needs. +type ipSets struct { + v4Transit *netipx.IPSet + v4Magic *netipx.IPSet + v6Transit *netipx.IPSet + v6Magic *netipx.IPSet +} + +func emptyIPSets() ipSets { + return ipSets{ + v4Transit: &netipx.IPSet{}, + v4Magic: &netipx.IPSet{}, + v6Transit: &netipx.IPSet{}, + v6Magic: &netipx.IPSet{}, + } +} + +// config holds the config derived from the self node view, // which includes the policy. -// nodeViewConfig is not safe for concurrent use. -type nodeViewConfig struct { +// config is not safe for concurrent use. +type config struct { isConfigured bool apps []appctype.Conn25Attr appsByName map[string]appctype.Conn25Attr appNamesByDomain map[dnsname.FQDN][]string selfDomains set.Set[dnsname.FQDN] - v4TransitIPSet netipx.IPSet - v4MagicIPSet netipx.IPSet - v6TransitIPSet netipx.IPSet - v6MagicIPSet netipx.IPSet + ipSets ipSets } -func configFromNodeView(n tailcfg.NodeView) (nodeViewConfig, error) { +func configFromNodeView(n tailcfg.NodeView) (*config, error) { apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.Conn25Attr](n.CapMap(), AppConnectorsExperimentalAttrName) if err != nil { - return nodeViewConfig{}, err + return &config{}, err } if len(apps) == 0 { - return nodeViewConfig{}, nil + return &config{}, nil } selfTags := set.SetOf(n.Tags().AsSlice()) - cfg := nodeViewConfig{ + cfg := &config{ isConfigured: true, apps: apps, appsByName: map[string]appctype.Conn25Attr{}, appNamesByDomain: map[dnsname.FQDN][]string{}, selfDomains: set.Set[dnsname.FQDN]{}, + ipSets: emptyIPSets(), } for _, app := range apps { selfMatchesTags := slices.ContainsFunc(app.Connectors, selfTags.Contains) for _, d := range app.Domains { fqdn, err := normalizeDNSName(d) if err != nil { - return nodeViewConfig{}, err + return &config{}, err } mak.Set(&cfg.appNamesByDomain, fqdn, append(cfg.appNamesByDomain[fqdn], app.Name)) if selfMatchesTags { @@ -567,62 +615,39 @@ func configFromNodeView(n tailcfg.NodeView) (nodeViewConfig, error) { app := apps[0] v4Mipp, err := ipSetFromIPRanges(app.V4MagicIPPool) if err != nil { - return nodeViewConfig{}, err + return &config{}, err } v4Tipp, err := ipSetFromIPRanges(app.V4TransitIPPool) if err != nil { - return nodeViewConfig{}, err + return &config{}, err } v6Mipp, err := ipSetFromIPRanges(app.V6MagicIPPool) if err != nil { - return nodeViewConfig{}, err + return &config{}, err } v6Tipp, err := ipSetFromIPRanges(app.V6TransitIPPool) if err != nil { - return nodeViewConfig{}, err + return &config{}, err } - cfg.v4MagicIPSet = *v4Mipp - cfg.v4TransitIPSet = *v4Tipp - cfg.v6MagicIPSet = *v6Mipp - cfg.v6TransitIPSet = *v6Tipp + ipSets := ipSets{ + v4Magic: v4Mipp, + v4Transit: v4Tipp, + v6Magic: v6Mipp, + v6Transit: v6Tipp, + } + cfg.ipSets = ipSets } return cfg, nil } -// prefsConfig holds the config derived from the current prefs. -// prefsConfig is not safe for concurrent use. -type prefsConfig struct { - isConfigured bool - isEligibleConnector bool -} - -func configFromPrefs(prefs ipn.PrefsView) prefsConfig { - return prefsConfig{ - isConfigured: true, - isEligibleConnector: prefs.AppConnector().Advertise, - } -} - -// config wraps the config from disparate sources. -// config is not safe for concurrent use. -type config struct { - nv nodeViewConfig - prefs prefsConfig -} - -// isSelfRoutedDomain reports whether the self node is currently -// acting as a connector for the given domain. -func (c config) isSelfRoutedDomain(d dnsname.FQDN) bool { - return c.prefs.isEligibleConnector && c.nv.selfDomains.Contains(d) -} - // client performs the conn25 functionality for clients of connectors // It allocates magic and transit IP addresses and communicates them with // connectors. // It's safe for concurrent use. type client struct { - logf logger.Logf - addrsCh chan addrs + logf logger.Logf + addrsCh chan addrs + getIPSets func() ipSets mu sync.Mutex // protects the fields below v4MagicIPPool *ippool @@ -631,28 +656,18 @@ type client struct { v6TransitIPPool *ippool assignments addrAssignments byConnKey map[key.NodePublic]set.Set[netip.Prefix] - config config -} - -func (c *client) getConfig() config { - c.mu.Lock() - defer c.mu.Unlock() - return c.config } // transitIPForMagicIP is part of the implementation of the IPMapper interface for dataflows lookups. // See also [IPMapper.ClientTransitIPForMagicIP]. -func (c *client) transitIPForMagicIP(magicIP netip.Addr) (netip.Addr, error) { +func (c *client) transitIPForMagicIP(magicIP netip.Addr) (netip.Addr, bool) { c.mu.Lock() defer c.mu.Unlock() v, ok := c.assignments.lookupByMagicIP(magicIP) if ok { - return v.transit, nil + return v.transit, true } - if !c.config.nv.v4MagicIPSet.Contains(magicIP) && !c.config.nv.v6MagicIPSet.Contains(magicIP) { - return netip.Addr{}, nil - } - return netip.Addr{}, ErrUnmappedMagicIP + return netip.Addr{}, false } // linkLocalAllow returns true if the provided packet with a link-local Dst address has a @@ -675,40 +690,28 @@ func (c *client) isKnownTransitIP(tip netip.Addr) bool { return ok } -// isConfigured reports whether the client has received both a node view -// config (from the netmap/policy) and a prefs config. Both are required -// before the feature is active. -func (c *client) isConfigured() bool { - c.mu.Lock() - defer c.mu.Unlock() - return c.config.prefs.isConfigured && c.config.nv.isConfigured -} - -func (c *client) reconfig(newCfg config) { +func (c *client) reconfig() { c.mu.Lock() defer c.mu.Unlock() - c.config = newCfg + ipSets := c.getIPSets() - c.v4MagicIPPool = newIPPool(&(newCfg.nv.v4MagicIPSet)) - c.v4TransitIPPool = newIPPool(&(newCfg.nv.v4TransitIPSet)) - c.v6MagicIPPool = newIPPool(&(newCfg.nv.v6MagicIPSet)) - c.v6TransitIPPool = newIPPool(&(newCfg.nv.v6TransitIPSet)) + c.v4MagicIPPool = newIPPool(ipSets.v4Magic) + c.v4TransitIPPool = newIPPool(ipSets.v4Transit) + c.v6MagicIPPool = newIPPool(ipSets.v6Magic) + c.v6TransitIPPool = newIPPool(ipSets.v6Transit) } // isConnectorDomain returns true if the domain is expected // to be routed through a peer connector, but returns false // if the self node is a connector responsible for routing the // domain, and false in all other cases. -func (c *client) isConnectorDomain(domain dnsname.FQDN) bool { - c.mu.Lock() - defer c.mu.Unlock() - - if c.config.isSelfRoutedDomain(domain) { +func (cfg *config) isConnectorDomain(domain dnsname.FQDN, prefsAdvertiseConnector bool) bool { + if prefsAdvertiseConnector && cfg.selfDomains.Contains(domain) { return false } - appNames, ok := c.config.nv.appNamesByDomain[domain] + appNames, ok := cfg.appNamesByDomain[domain] return ok && len(appNames) > 0 } @@ -716,7 +719,7 @@ func (c *client) isConnectorDomain(domain dnsname.FQDN) bool { // for this domain+dst address, so that this client can use conn25 connectors. // It checks that this domain should be routed and that this client is not itself a connector for the domain // and generally if it is valid to make the assignment. -func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, error) { +func (c *client) reserveAddresses(app string, domain dnsname.FQDN, dst netip.Addr) (addrs, error) { if !dst.IsValid() { return addrs{}, errors.New("dst is not valid") } @@ -725,12 +728,6 @@ func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, e if existing, ok := c.assignments.lookupByDomainDst(domain, dst); ok { return existing, nil } - appNames, _ := c.config.nv.appNamesByDomain[domain] - if len(appNames) == 0 { - return addrs{}, fmt.Errorf("no app names found for domain %q", domain) - } - // only reserve for first app - app := appNames[0] var mip, tip netip.Addr var err error @@ -789,7 +786,7 @@ func (e *extension) sendLoop(ctx context.Context) { return case as := <-e.conn25.client.addrsCh: if err := e.handleAddressAssignment(ctx, as); err != nil { - e.conn25.client.logf("error handling transit IP assignment (app: %s, mip: %v, src: %v): %v", as.app, as.magic, as.dst, err) + e.conn25.logf("error handling transit IP assignment (app: %s, mip: %v, src: %v): %v", as.app, as.magic, as.dst, err) } } } @@ -873,9 +870,13 @@ func makePeerAPIReq(ctx context.Context, httpClient *http.Client, urlBase string } func (e *extension) sendAddressAssignment(ctx context.Context, as addrs) (tailcfg.NodeView, error) { - app, ok := e.conn25.client.getConfig().nv.appsByName[as.app] + cfg, ok := e.conn25.getConfig() if !ok { - e.conn25.client.logf("App not found for app: %s (domain: %s)", as.app, as.domain) + return tailcfg.NodeView{}, errors.New("not configured") + } + app, ok := cfg.appsByName[as.app] + if !ok { + e.conn25.logf("App not found for app: %s (domain: %s)", as.app, as.domain) return tailcfg.NodeView{}, errors.New("app not found") } @@ -927,7 +928,10 @@ func makeServFail(logf logger.Logf, h dnsmessage.Header, q dnsmessage.Question) return bs } -func (c *client) mapDNSResponse(buf []byte) []byte { +// mapDNSResponse parses and inspects the DNS response. If the domain +// is determined to belong to app this node is client for, it assigns addresses +// for connecting and rewrites the response to contain Magic IPs. +func (c *Conn25) mapDNSResponse(buf []byte) []byte { var p dnsmessage.Parser hdr, err := p.Start(buf) if err != nil { @@ -952,10 +956,23 @@ func (c *client) mapDNSResponse(buf []byte) []byte { if err != nil { return buf } - if !c.isConnectorDomain(queriedDomain) { + + cfg, ok := c.getConfig() + if !ok { return buf } + if !cfg.isConnectorDomain(queriedDomain, c.prefsAdvertiseConnector.Load()) { + return buf + } + + appNames, _ := cfg.appNamesByDomain[queriedDomain] + if len(appNames) == 0 { + return buf + } + // only reserve for first app + app := appNames[0] + // Now we know this is a dns response we think we should rewrite, we're going to provide our response which // currently means we will: // * write the questions through as they are @@ -964,7 +981,7 @@ func (c *client) mapDNSResponse(buf []byte) []byte { var answers []dnsResponseRewrite if question.Type != dnsmessage.TypeA && question.Type != dnsmessage.TypeAAAA { c.logf("mapping dns response for connector domain, unsupported type: %v", question.Type) - newBuf, err := c.rewriteDNSResponse(hdr, questions, answers) + newBuf, err := c.client.rewriteDNSResponse(app, hdr, questions, answers) if err != nil { c.logf("error writing empty response for unsupported type: %v", err) return makeServFail(c.logf, hdr, question) @@ -1049,7 +1066,7 @@ func (c *client) mapDNSResponse(buf []byte) []byte { } } } - newBuf, err := c.rewriteDNSResponse(hdr, questions, answers) + newBuf, err := c.client.rewriteDNSResponse(app, hdr, questions, answers) if err != nil { c.logf("error rewriting dns response: %v", err) return makeServFail(c.logf, hdr, question) @@ -1057,7 +1074,7 @@ func (c *client) mapDNSResponse(buf []byte) []byte { return newBuf } -func (c *client) rewriteDNSResponse(hdr dnsmessage.Header, questions []dnsmessage.Question, answers []dnsResponseRewrite) ([]byte, error) { +func (c *client) rewriteDNSResponse(app string, hdr dnsmessage.Header, questions []dnsmessage.Question, answers []dnsResponseRewrite) ([]byte, error) { b := dnsmessage.NewBuilder(nil, hdr) b.EnableCompression() if err := b.StartQuestions(); err != nil { @@ -1074,7 +1091,7 @@ func (c *client) rewriteDNSResponse(hdr dnsmessage.Header, questions []dnsmessag // make an answer for each rewrite for _, rw := range answers { - as, err := c.reserveAddresses(rw.domain, rw.dst) + as, err := c.reserveAddresses(app, rw.domain, rw.dst) if err != nil { return nil, err } @@ -1115,22 +1132,18 @@ type connector struct { // transitIPs is a map of connector client peer IP -> client transitIPs that we update as connector client peers instruct us to, and then use to route traffic to its destination on behalf of connector clients. // Note that each peer could potentially have two maps: one for its IPv4 address, and one for its IPv6 address. The transit IPs map for a given peer IP will contain transit IPs of the same family as the peer's IP. transitIPs map[netip.Addr]map[netip.Addr]appAddr - config config } // realIPForTransitIPConnection is part of the implementation of the IPMapper interface for dataflows lookups. // See also [IPMapper.ConnectorRealIPForTransitIPConnection]. -func (c *connector) realIPForTransitIPConnection(srcIP netip.Addr, transitIP netip.Addr) (netip.Addr, error) { +func (c *connector) realIPForTransitIPConnection(srcIP netip.Addr, transitIP netip.Addr) (netip.Addr, bool) { c.mu.Lock() defer c.mu.Unlock() v, ok := c.lookupBySrcIPAndTransitIP(srcIP, transitIP) if ok { - return v.addr, nil + return v.addr, true } - if !c.config.nv.v4TransitIPSet.Contains(transitIP) && !c.config.nv.v6TransitIPSet.Contains(transitIP) { - return netip.Addr{}, nil - } - return netip.Addr{}, ErrUnmappedSrcAndTransitIP + return netip.Addr{}, false } const packetFilterAllowReason = "app connector transit IP" @@ -1156,12 +1169,6 @@ func (c *connector) lookupBySrcIPAndTransitIP(srcIP, transitIP netip.Addr) (appA return v, ok } -func (c *connector) reconfig(newCfg config) { - c.mu.Lock() - defer c.mu.Unlock() - c.config = newCfg -} - type addrs struct { dst netip.Addr magic netip.Addr diff --git a/feature/conn25/conn25_test.go b/feature/conn25/conn25_test.go index 124b739e4..1c56e9b83 100644 --- a/feature/conn25/conn25_test.go +++ b/feature/conn25/conn25_test.go @@ -8,7 +8,6 @@ import ( "net/http" "net/http/httptest" "net/netip" - "reflect" "slices" "testing" "time" @@ -341,11 +340,10 @@ func TestHandleConnectorTransitIPRequest(t *testing.T) { // Use the same Conn25 for each request in the test and seed it with a test app name. c := newConn25(logger.Discard) - c.connector.config = config{ - nv: nodeViewConfig{ - appsByName: map[string]appctype.Conn25Attr{appName: {}}, - }, - } + c.reconfig(&config{ + isConfigured: true, + appsByName: map[string]appctype.Conn25Attr{appName: {}}, + }) for i, peer := range tt.ctipReqPeers { req := tt.ctipReqs[i] @@ -395,15 +393,21 @@ func TestHandleConnectorTransitIPRequest(t *testing.T) { func TestReserveIPs(t *testing.T) { c := newConn25(logger.Discard) - c.client.v4MagicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) - c.client.v6MagicIPPool = newIPPool(mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0100::/80")) - c.client.v4TransitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) - c.client.v6TransitIPPool = newIPPool(mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0200::/80")) app := "a" domainStr := "example.com." mbd := map[dnsname.FQDN][]string{} mbd["example.com."] = []string{app} - c.client.config.nv.appNamesByDomain = mbd + cfg := &config{ + isConfigured: true, + appNamesByDomain: mbd, + ipSets: ipSets{ + v4Magic: mustIPSetFromPrefix("100.64.0.0/24"), + v6Magic: mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0100::/80"), + v4Transit: mustIPSetFromPrefix("169.254.0.0/24"), + v6Transit: mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0200::/80"), + }, + } + c.reconfig(cfg) domain := must.Get(dnsname.ToFQDN(domainStr)) for _, tt := range []struct { @@ -426,7 +430,7 @@ func TestReserveIPs(t *testing.T) { }, } { t.Run(tt.name, func(t *testing.T) { - addrs, err := c.client.reserveAddresses(domain, tt.dst) + addrs, err := c.client.reserveAddresses(app, domain, tt.dst) if err != nil { t.Fatal(err) } @@ -459,74 +463,26 @@ func TestReconfig(t *testing.T) { c := newConn25(logger.Discard) if c.isConfigured() { - t.Fatal("expected Conn25 to report unconfigured before reconfig") + t.Fatal("expected Conn25 isConfigured() to report unconfigured before reconfig") } sn := (&tailcfg.Node{ CapMap: capMap, }).View() - cfg := mustConfig(t, sn, testPrefsIsConnector) + cfg := mustConfig(t, sn) c.reconfig(cfg) if !c.isConfigured() { - t.Fatal("expected Conn25 to report configured after reconfig") + t.Fatal("expected Conn25 isConfigured() to report configured after reconfig") } - if !reflect.DeepEqual(c.client.config, c.connector.config) { - t.Fatal("client and connector have different configs") + cfg, ok := c.getConfig() + if !ok { + t.Fatal("expected Conn25 getConfig() to report configured after reconfig") } - if len(c.client.config.nv.apps) != 1 || c.client.config.nv.apps[0].Name != "app1" { - t.Fatalf("want apps to have one entry 'app1', got %v", c.client.config.nv.apps) - } - - if c.client.config.prefs.isEligibleConnector != true { - t.Fatal("want prefs config to have isEligibleConnector=true") - } - - c.reconfig(config{ - prefs: prefsConfig{isConfigured: true}, - }) - if c.isConfigured() { - t.Fatal("expected Conn25 to report unconfigured with only prefs config") - } - - c.reconfig(config{ - nv: nodeViewConfig{isConfigured: true}, - }) - if c.isConfigured() { - t.Fatal("expected Conn25 to report unconfigured with only nodeview config") - } -} - -func TestConfigFromPrefs(t *testing.T) { - for _, tt := range []struct { - name string - prefs ipn.PrefsView - expected prefsConfig - }{ - { - name: "is-eligible-connector", - prefs: testPrefsIsConnector, - expected: prefsConfig{ - isConfigured: true, - isEligibleConnector: true, - }, - }, - { - name: "not-eligible-connector", - prefs: testPrefsNotConnector, - expected: prefsConfig{ - isConfigured: true, - isEligibleConnector: false, - }, - }, - } { - t.Run(tt.name, func(t *testing.T) { - if diff := cmp.Diff(tt.expected, configFromPrefs(tt.prefs), cmp.AllowUnexported(prefsConfig{})); diff != "" { - t.Errorf("unexpected prefsConfig (-want,+got): %s", diff) - } - }) + if len(cfg.apps) != 1 || cfg.apps[0].Name != "app1" { + t.Fatalf("want apps to have one entry 'app1', got %v", cfg.apps) } } @@ -646,23 +602,16 @@ func makeSelfNode(t *testing.T, attrs []appctype.Conn25Attr, tags []string) tail } var ( - testPrefsIsConnector = (&ipn.Prefs{AppConnector: ipn.AppConnectorPrefs{Advertise: true}}).View() testPrefsNotConnector = (&ipn.Prefs{AppConnector: ipn.AppConnectorPrefs{Advertise: false}}).View() ) -func mustConfig(t *testing.T, selfNode tailcfg.NodeView, prefs ipn.PrefsView) config { +func mustConfig(t *testing.T, selfNode tailcfg.NodeView) *config { t.Helper() - nvCfg, err := configFromNodeView(selfNode) + cfg, err := configFromNodeView(selfNode) if err != nil { t.Fatal(err) } - - prefsCfg := configFromPrefs(prefs) - - return config{ - nv: nvCfg, - prefs: prefsCfg, - } + return cfg } func v4RangeFrom(from, to string) netipx.IPRange { @@ -941,14 +890,11 @@ func TestMapDNSResponseAssignsAddrs(t *testing.T) { V4TransitIPPool: []netipx.IPRange{v4RangeFrom("40", "50")}, V6TransitIPPool: []netipx.IPRange{v6RangeFrom("40", "50")}, }}, tt.selfTags) - prefs := testPrefsNotConnector - if tt.isEligibleConnector { - prefs = testPrefsIsConnector - } c := newConn25(logger.Discard) - cfg := mustConfig(t, sn, prefs) + cfg := mustConfig(t, sn) c.reconfig(cfg) + c.prefsAdvertiseConnector.Store(tt.isEligibleConnector) c.mapDNSResponse(dnsResp) if diff := cmp.Diff( @@ -979,19 +925,19 @@ func TestReserveAddressesDeduplicated(t *testing.T) { }, } { t.Run(tt.name, func(t *testing.T) { - c := newConn25(logger.Discard) - c.client.v4MagicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) - c.client.v6MagicIPPool = newIPPool(mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0100::/80")) - c.client.v4TransitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) - c.client.v6TransitIPPool = newIPPool(mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0200::/80")) - c.client.config.nv.appNamesByDomain = map[dnsname.FQDN][]string{"example.com.": {"a"}} + conn25 := newConn25(t.Logf) + c := conn25.client + c.v4MagicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) + c.v6MagicIPPool = newIPPool(mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0100::/80")) + c.v4TransitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) + c.v6TransitIPPool = newIPPool(mustIPSetFromPrefix("fd7a:115c:a1e0:a99c:0200::/80")) - first, err := c.client.reserveAddresses("example.com.", tt.dst) + first, err := c.reserveAddresses("a", "example.com.", tt.dst) if err != nil { t.Fatal(err) } - second, err := c.client.reserveAddresses("example.com.", tt.dst) + second, err := c.reserveAddresses("a", "example.com.", tt.dst) if err != nil { t.Fatal(err) } @@ -999,10 +945,10 @@ func TestReserveAddressesDeduplicated(t *testing.T) { if first.magic != second.magic { t.Errorf("expected same magic addrs on repeated call, got first=%v second=%v", first.magic, second.magic) } - if got := len(c.client.assignments.byMagicIP); got != 1 { + if got := len(c.assignments.byMagicIP); got != 1 { t.Errorf("want 1 entry in byMagicIP, got %d", got) } - if got := len(c.client.assignments.byDomainDst); got != 1 { + if got := len(c.assignments.byDomainDst); got != 1 { t.Errorf("want 1 entry in byDomainDst, got %d", got) } @@ -1039,6 +985,9 @@ type testProfileServices struct { } func (p *testProfileServices) CurrentPrefs() ipn.PrefsView { return p.prefs } +func (p *testProfileServices) CurrentProfileState() (ipn.LoginProfileView, ipn.PrefsView) { + return ipn.LoginProfileView{}, p.prefs +} type testHost struct { ipnext.Host @@ -1127,7 +1076,7 @@ func TestAddressAssignmentIsHandled(t *testing.T) { Domains: []string{"example.com"}, }}, []string{}) - cfg := mustConfig(t, sn, testPrefsNotConnector) + cfg := mustConfig(t, sn) ext.conn25.reconfig(cfg) as := addrs{ @@ -1208,7 +1157,7 @@ func TestMapDNSResponseRewritesResponses(t *testing.T) { V6TransitIPPool: []netipx.IPRange{netipx.IPRangeFrom(netip.MustParseAddr("2606:4700::6813:100"), netip.MustParseAddr("2606:4700::6813:1ff"))}, }}, []string{}) - cfg := mustConfig(t, sn, testPrefsNotConnector) + cfg := mustConfig(t, sn) compareToRecords := func(t *testing.T, resources []dnsmessage.Resource, want []netip.Addr) { t.Helper() @@ -1560,7 +1509,7 @@ func TestHandleAddressAssignmentStoresTransitIPs(t *testing.T) { }, }, []string{}) - cfg := mustConfig(t, sn, testPrefsNotConnector) + cfg := mustConfig(t, sn) ext.conn25.reconfig(cfg) type lookup struct { @@ -1738,7 +1687,7 @@ func TestClientTransitIPForMagicIP(t *testing.T) { V4MagicIPPool: []netipx.IPRange{v4RangeFrom("0", "10")}, // 100.64.0.0 - 100.64.0.10 V6MagicIPPool: []netipx.IPRange{v6RangeFrom("0", "10")}, }}, []string{}) - cfg := mustConfig(t, sn, testPrefsNotConnector) + cfg := mustConfig(t, sn) mappedMip := netip.MustParseAddr("100.64.0.0") mappedTip := netip.MustParseAddr("169.0.0.0") @@ -1813,7 +1762,7 @@ func TestClientTransitIPForMagicIP(t *testing.T) { }); err != nil { t.Fatal(err) } - tip, err := c.client.transitIPForMagicIP(tt.mip) + tip, err := c.ClientTransitIPForMagicIP(tt.mip) if tip != tt.wantTip { t.Fatalf("checking transit ip: want %v, got %v", tt.wantTip, tip) } @@ -1828,7 +1777,7 @@ func TestConnectorRealIPForTransitIPConnection(t *testing.T) { sn := makeSelfNode(t, []appctype.Conn25Attr{{ V4TransitIPPool: []netipx.IPRange{v4RangeFrom("40", "50")}, // 100.64.0.40 - 100.64.0.50 }}, []string{}) - cfg := mustConfig(t, sn, testPrefsIsConnector) + cfg := mustConfig(t, sn) mappedSrc := netip.MustParseAddr("100.0.0.1") unmappedSrc := netip.MustParseAddr("100.0.0.2") @@ -1885,7 +1834,7 @@ func TestConnectorRealIPForTransitIPConnection(t *testing.T) { c.connector.transitIPs = map[netip.Addr]map[netip.Addr]appAddr{} c.connector.transitIPs[mappedSrc] = map[netip.Addr]appAddr{} c.connector.transitIPs[mappedSrc][mappedTip] = appAddr{addr: mappedMip} - mip, err := c.connector.realIPForTransitIPConnection(tt.src, tt.tip) + mip, err := c.ConnectorRealIPForTransitIPConnection(tt.src, tt.tip) if mip != tt.wantMip { t.Fatalf("checking magic ip: want %v, got %v", tt.wantMip, mip) } @@ -1974,7 +1923,7 @@ func TestGetMagicRange(t *testing.T) { V4MagicIPPool: []netipx.IPRange{netipx.IPRangeFrom(netip.MustParseAddr("0.0.0.1"), netip.MustParseAddr("0.0.0.3"))}, V6MagicIPPool: []netipx.IPRange{netipx.IPRangeFrom(netip.MustParseAddr("::1"), netip.MustParseAddr("::3"))}, }}, []string{}) - cfg := mustConfig(t, sn, testPrefsNotConnector) + cfg := mustConfig(t, sn) c := newConn25(t.Logf) c.reconfig(cfg) ext := &extension{