Brad Fitzpatrick b2c9e9bd71 ipn/ipnlocal, control/controlclient: process node adds/removes in constant time
For large tailnets (~50k+ nodes) with frequent peer churn (ephemeral
GitHub Actions workers etc.), tailscaled used to rebuild the full
netmap and fan it out on the IPN bus on every MapResponse that
added or removed a peer. Two compounding O(N) costs per delta: the
full netmap rebuild + every Notify.NetMap encode to every bus watcher.

This change tackles both:

  1. Plumb O(1) peer add/remove through the delta path. PeersChanged
     and PeersRemoved no longer veto the delta path; instead they
     mutate the per-node-backend peer map in place.

  2. Restrict ipn.Notify.NetMap emission to the platforms whose host
     GUIs still depend on it (Windows, macOS, iOS) and migrate
     in-tree consumers off it everywhere else:

     - Migrate reactive consumers (containerboot, kube agents,
       sniproxy, tsconsensus, etc.) off Notify.NetMap to the
       previously-added Notify.SelfChange signal so they no longer
       have to subscribe to the full netmap.
     - Add ipn.NotifyNoNetMap so GUI clients on legacy-emit platforms
       that have already migrated can opt out of the per-watcher
       NetMap encode.
     - Gate Notify.NetMap emission on the producer side by a compile-
       time GOOS check, so the supporting code is dead-code-eliminated
       on Linux and other geese where no GUI consumer needs it.

Re-running BenchmarkGiantTailnet from tstest/largetailnet, which was
added along with baseline numbers on unmodified main in ad5436af0d57,
the per-delta cost (one peer add+remove pair) is now ~O(1) regardless
of tailnet size N:

    N         no-watcher (ms/op)            bus-watcher (ms/op)
              before    now     factor      before    now     factor
     10000        32   0.11       300x         166   0.13      1300x
     50000       222   0.11      2000x         865   0.13      6700x
    100000       504   0.12      4100x        1765   0.13     13400x
    250000      1551   0.12     12500x        4696   0.15     32400x

Updates #12542

Change-Id: I94e34b37331d1a8ec74c299deffadf4d061fda9e
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2026-05-02 00:36:38 +00:00

1259 lines
35 KiB
Go

// Copyright (c) Tailscale Inc & contributors
// SPDX-License-Identifier: BSD-3-Clause
package controlclient
import (
"cmp"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"io"
"maps"
"net"
"net/netip"
"reflect"
"runtime"
"runtime/debug"
"slices"
"strconv"
"sync"
"time"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
"tailscale.com/hostinfo"
"tailscale.com/tailcfg"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/views"
"tailscale.com/util/clientmetric"
"tailscale.com/util/mak"
"tailscale.com/util/set"
"tailscale.com/util/slicesx"
"tailscale.com/wgengine/filter"
)
type responseWithSource struct {
response *tailcfg.MapResponse
viaTSMP bool
}
// mapSession holds the state over a long-polled "map" request to the
// control plane.
//
// It accepts incremental tailcfg.MapResponse values to
// netMapForResponse and returns fully inflated NetworkMaps, filling
// in the omitted data implicit from prior MapResponse values from
// within the same session (the same long-poll HTTP response to the
// one MapRequest).
type mapSession struct {
// Immutable fields.
netmapUpdater NetmapUpdater // called on changes (in addition to the optional hooks below)
controlKnobs *controlknobs.Knobs // or nil
privateNodeKey key.NodePrivate
publicNodeKey key.NodePublic
logf logger.Logf
vlogf logger.Logf
machinePubKey key.MachinePublic
altClock tstime.Clock // if nil, regular time is used
cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context
keepAliveZ []byte // if non-nil, the learned zstd encoding of the just-KeepAlive message for this session
ztdDecodesForTest int // for testing
// sessionAliveCtx is a Background-based context that's alive for the
// duration of the mapSession that we own the lifetime of. It's closed by
// sessionAliveCtxClose.
sessionAliveCtx context.Context
sessionAliveCtxClose context.CancelFunc // closes sessionAliveCtx
// Optional hooks, guaranteed non-nil (set to no-op funcs) by the
// newMapSession constructor. They must be overridden if desired
// before the mapSession is used.
// onDebug specifies what to do with a *tailcfg.Debug message.
onDebug func(context.Context, *tailcfg.Debug) error
// onSelfNodeChanged is called before the NetmapUpdater if the self node was
// changed.
onSelfNodeChanged func(*netmap.NetworkMap)
// Fields storing state over the course of multiple MapResponses.
lastPrintMap time.Time
lastNode tailcfg.NodeView
lastCapSet set.Set[tailcfg.NodeCapability]
lastDNSConfig *tailcfg.DNSConfig
lastDERPMap *tailcfg.DERPMap
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfileView
lastPacketFilterRules views.Slice[tailcfg.FilterRule] // concatenation of all namedPacketFilters
namedPacketFilters map[string]views.Slice[tailcfg.FilterRule]
lastParsedPacketFilter []filter.Match
lastSSHPolicy *tailcfg.SSHPolicy
collectServices bool
lastDomain string
lastDomainAuditLogID string
lastHealth []string
lastDisplayMessages map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage
lastPopBrowserURL string
lastTKAInfo *tailcfg.TKAInfo
lastNetmapSummary string // from NetworkMap.VeryConcise
cqmu sync.Mutex
changeQueue chan responseWithSource
changeQueueClosed bool
processQueue sync.WaitGroup
// mu protects the peers map.
peersMu sync.RWMutex
peers map[tailcfg.NodeID]tailcfg.NodeView
}
// newMapSession returns a mostly unconfigured new mapSession.
//
// Modify its optional fields on the returned value before use.
//
// It must have its Close method called to release resources.
func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater, controlKnobs *controlknobs.Knobs) *mapSession {
ms := &mapSession{
netmapUpdater: nu,
controlKnobs: controlKnobs,
privateNodeKey: privateNodeKey,
publicNodeKey: privateNodeKey.Public(),
lastDNSConfig: new(tailcfg.DNSConfig),
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfileView{},
// Non-nil no-op defaults, to be optionally overridden by the caller.
logf: logger.Discard,
vlogf: logger.Discard,
cancel: func() {},
onDebug: func(context.Context, *tailcfg.Debug) error { return nil },
onSelfNodeChanged: func(*netmap.NetworkMap) {},
changeQueue: make(chan responseWithSource),
changeQueueClosed: false,
}
ms.sessionAliveCtx, ms.sessionAliveCtxClose = context.WithCancel(context.Background())
ms.processQueue.Add(1)
go ms.run()
return ms
}
// run starts the mapSession processing a queue of tailcfg.MapResponse one by
// one until close() is called on the mapSession.
// When the mapSession is closed, the remaining queue is locked and processed
// before the mapSession is done processing.
func (ms *mapSession) run() {
defer ms.processQueue.Done()
for {
select {
case change := <-ms.changeQueue:
ms.handleNonKeepAliveMapResponse(ms.sessionAliveCtx, change.response, change.viaTSMP)
case <-ms.sessionAliveCtx.Done():
// Drain any remaining items in the queue before exiting.
// Lock the queue during this time to avoid updates through other channels
// to be overwritten. This is especially relevant for calls to
// updateDiscoForNode.
ms.cqmu.Lock()
ms.changeQueueClosed = true
ms.cqmu.Unlock()
for {
select {
case change := <-ms.changeQueue:
ms.handleNonKeepAliveMapResponse(ms.sessionAliveCtx, change.response, change.viaTSMP)
default:
// Queue is empty, close it and exit
close(ms.changeQueue)
return
}
}
}
}
}
// occasionallyPrintSummary logs summary at most once very 5 minutes. The
// summary is the Netmap.VeryConcise result from the last received map response.
func (ms *mapSession) occasionallyPrintSummary(summary string) {
// Occasionally print the netmap header.
// This is handy for debugging, and our logs processing
// pipeline depends on it. (TODO: Remove this dependency.)
now := ms.clock().Now()
if now.Sub(ms.lastPrintMap) < 5*time.Minute {
return
}
ms.lastPrintMap = now
ms.logf("[v1] new network map (periodic):\n%s", summary)
}
func (ms *mapSession) clock() tstime.Clock {
return cmp.Or[tstime.Clock](ms.altClock, tstime.StdClock{})
}
func (ms *mapSession) Close() {
ms.sessionAliveCtxClose()
ms.processQueue.Wait()
}
var ErrChangeQueueClosed = errors.New("change queue closed")
func (ms *mapSession) updateDiscoForNode(id tailcfg.NodeID, key key.NodePublic, discoKey key.DiscoPublic, lastSeen time.Time, online bool) error {
ms.cqmu.Lock()
if ms.changeQueueClosed {
ms.cqmu.Unlock()
ms.processQueue.Wait()
return ErrChangeQueueClosed
}
resp := responseWithSource{
response: &tailcfg.MapResponse{
PeersChangedPatch: []*tailcfg.PeerChange{{
NodeID: id,
Key: &key,
LastSeen: &lastSeen,
Online: &online,
DiscoKey: &discoKey,
}},
},
viaTSMP: true,
}
ms.changeQueue <- resp
ms.cqmu.Unlock()
return nil
}
func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *tailcfg.MapResponse) error {
ms.cqmu.Lock()
if ms.changeQueueClosed {
ms.cqmu.Unlock()
ms.processQueue.Wait()
return ErrChangeQueueClosed
}
change := responseWithSource{
response: resp,
viaTSMP: false,
}
ms.changeQueue <- change
ms.cqmu.Unlock()
return nil
}
// handleNonKeepAliveMapResponse handles a non-KeepAlive MapResponse (full or
// incremental).
//
// All fields that are valid on a KeepAlive MapResponse have already been
// handled.
//
// TODO(bradfitz): make this handle all fields later. For now (2023-08-20) this
// is [re]factoring progress enough.
func (ms *mapSession) handleNonKeepAliveMapResponse(ctx context.Context, resp *tailcfg.MapResponse, viaTSMP bool) error {
if debug := resp.Debug; debug != nil {
if err := ms.onDebug(ctx, debug); err != nil {
return err
}
}
if DevKnob.StripEndpoints() {
for _, p := range resp.Peers {
p.Endpoints = nil
}
for _, p := range resp.PeersChanged {
p.Endpoints = nil
}
}
// For responses that mutate the self node, check for updated nodeAttrs.
if resp.Node != nil {
upgradeNode(resp.Node)
if DevKnob.StripCaps() {
resp.Node.Capabilities = nil
resp.Node.CapMap = nil
}
// If the server is old and is still sending us Capabilities instead of
// CapMap, convert it to CapMap early so the rest of the client code can
// work only in terms of CapMap.
for _, c := range resp.Node.Capabilities {
if _, ok := resp.Node.CapMap[c]; !ok {
mak.Set(&resp.Node.CapMap, c, nil)
}
}
ms.controlKnobs.UpdateFromNodeAttributes(resp.Node.CapMap)
}
for _, p := range resp.Peers {
upgradeNode(p)
}
for _, p := range resp.PeersChanged {
upgradeNode(p)
}
// Call Node.InitDisplayNames on any changed nodes.
initDisplayNames(cmp.Or(resp.Node.View(), ms.lastNode), resp)
ms.patchifyPeersChanged(resp)
ms.removeUnwantedDiscoUpdates(resp, viaTSMP)
// TSMP learned key was rejected, no need to do any more work in the engine.
if viaTSMP && len(resp.PeersChangedPatch) == 0 {
return nil
}
ms.removeUnwantedDiscoUpdatesFromFullNetmapUpdate(resp)
ms.updateStateFromResponse(resp)
// If source was learned via TSMP, the updated disco key need to be marked in
// userspaceEngine as an update that should not reconfigure the wireguard
// connection.
if viaTSMP {
ms.tryMarkDiscoAsLearnedFromTSMP(resp)
}
if ms.tryHandleIncrementally(resp) {
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
return nil
}
// We have to rebuild the whole netmap (lots of garbage & work downstream of
// our UpdateFullNetmap call). This is the part we tried to avoid but
// some field mutations (especially rare ones) aren't yet handled.
if runtime.GOOS == "ios" {
// Memory is tight on iOS. Free what we can while we
// can before this potential burst of in-use memory.
debug.FreeOSMemory()
}
nm := ms.netmap()
ms.lastNetmapSummary = nm.VeryConcise()
ms.occasionallyPrintSummary(ms.lastNetmapSummary)
// If the self node changed, we might need to update persist.
if resp.Node != nil {
ms.onSelfNodeChanged(nm)
}
ms.netmapUpdater.UpdateFullNetmap(nm)
return nil
}
func (ms *mapSession) tryMarkDiscoAsLearnedFromTSMP(res *tailcfg.MapResponse) {
dun, ok := ms.netmapUpdater.(patchDiscoKeyer)
if !ok {
return
}
// In reality we should never really have more than one change here over TSMP.
for _, change := range res.PeersChangedPatch {
if change == nil || change.DiscoKey == nil || change.Key == nil {
continue
}
dun.PatchDiscoKey(*change.Key, *change.DiscoKey)
}
}
// upgradeNode upgrades Node fields from the server into the modern forms
// not using deprecated fields.
func upgradeNode(n *tailcfg.Node) {
if n == nil {
return
}
if n.LegacyDERPString != "" {
if n.HomeDERP == 0 {
ip, portStr, err := net.SplitHostPort(n.LegacyDERPString)
if ip == tailcfg.DerpMagicIP && err == nil {
port, err := strconv.Atoi(portStr)
if err == nil {
n.HomeDERP = port
}
}
}
n.LegacyDERPString = ""
}
if DevKnob.StripHomeDERP() {
n.HomeDERP = 0
}
if n.AllowedIPs == nil {
n.AllowedIPs = slices.Clone(n.Addresses)
}
}
func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool {
if ms.controlKnobs != nil && ms.controlKnobs.DisableDeltaUpdates.Load() {
return false
}
nud, ok := ms.netmapUpdater.(NetmapDeltaUpdater)
if !ok {
return false
}
// If the response carries a new packet filter, the updater must
// support pushing it narrowly; otherwise fall back to a full netmap
// rebuild. PacketFilter/PacketFilters are no longer in
// mapResponseContainsNonPatchFields, so MutationsFromMapResponse will
// happily return mutations alongside a filter change — we need to
// deliver the filter separately before those mutations land.
if res.PacketFilter != nil || res.PacketFilters != nil {
pfu, ok := ms.netmapUpdater.(PacketFilterUpdater)
if !ok {
return false
}
pfu.UpdatePacketFilter(ms.lastPacketFilterRules, ms.lastParsedPacketFilter)
}
// Same shape for UserProfiles: deliver any new/updated profiles before
// the peer mutations that may reference them, so bus consumers never
// see a UserID for which a profile hasn't been published. The values
// are read from ms.lastUserProfile (just populated by
// updateStateFromResponse) so views are shared with mapSession's
// store; downstream consumers can use [UserProfileView.Equal] for
// dedup without copying.
if len(res.UserProfiles) > 0 {
upu, ok := ms.netmapUpdater.(UserProfileUpdater)
if !ok {
return false
}
profiles := make(map[tailcfg.UserID]tailcfg.UserProfileView, len(res.UserProfiles))
for _, up := range res.UserProfiles {
profiles[up.ID] = ms.lastUserProfile[up.ID]
}
upu.UpdateUserProfiles(profiles)
}
mutations, ok := netmap.MutationsFromMapResponse(res, time.Now())
if !ok {
return false
}
if len(mutations) > 0 {
return nud.UpdateNetmapDelta(mutations)
}
return true
}
// updateStats are some stats from updateStateFromResponse, primarily for
// testing. It's meant to be cheap enough to always compute, though. It doesn't
// allocate.
type updateStats struct {
allNew bool
added int
removed int
changed int
}
// removeUnwantedDiscoUpdates goes over the patchified updates and reject items
// where the node is offline and has last been seen before the recorded last seen.
func (ms *mapSession) removeUnwantedDiscoUpdates(resp *tailcfg.MapResponse, viaTSMP bool) {
ms.peersMu.RLock()
defer ms.peersMu.RUnlock()
acceptedDiscoUpdates := resp.PeersChangedPatch[:0]
for _, change := range resp.PeersChangedPatch {
// Accept if:
// - DiscoKey is nil and did not change.
// - Fields we rely on for rejection is missing.
if change.DiscoKey == nil || change.Online == nil || change.LastSeen == nil {
acceptedDiscoUpdates = append(acceptedDiscoUpdates, change)
continue
}
existingNode, ok := ms.peers[change.NodeID]
// Accept if:
// - Cannot find the peer, don't have enough data.
if !ok {
acceptedDiscoUpdates = append(acceptedDiscoUpdates, change)
continue
}
// Reject if:
// - key was learned via tsmp AND,
// - existing node is online AND,
// - key did not change.
// Here to avoid a deeper reconfig in the case where we get a TSMP key
// exchange while that node is already in a connected state (from the view
// of the control plane). This is meant to keep the node stable, avoiding a
// reconfiguration of the node deeper down in the engine.
// With this, we are avoiding updating the LastSeen and Online fields from
// TSMP updates when that is not relevant, overall making the connection
// state change less, and updating the engine less.
if viaTSMP && existingNode.Online().Get() &&
*change.DiscoKey == existingNode.DiscoKey() {
continue
}
// Accept if:
// - Node is online.
if *change.Online {
acceptedDiscoUpdates = append(acceptedDiscoUpdates, change)
continue
}
// Accept if:
// - if we don't have a last seen to compare against on the existing node.
// - OR lastSeen moved forward in time.
if existingLastSeen, ok := existingNode.LastSeen().GetOk(); !ok ||
change.LastSeen.After(existingLastSeen) {
acceptedDiscoUpdates = append(acceptedDiscoUpdates, change)
}
}
resp.PeersChangedPatch = acceptedDiscoUpdates
}
// removeUnwantedDiscoUpdatesFromFullNetmapUpdate makes a pass over the full
// set of peers in an update, usually only received when getting a full netmap
// from control at startup. If the pass finds a peer with a disco key where the
// local netmap has a newer key learned via TSMP, overwrite the update with the
// key from TSMP.
func (ms *mapSession) removeUnwantedDiscoUpdatesFromFullNetmapUpdate(resp *tailcfg.MapResponse) {
ms.peersMu.RLock()
defer ms.peersMu.RUnlock()
if len(resp.Peers) == 0 {
return
}
for _, peer := range resp.Peers {
if peer.DiscoKey.IsZero() {
continue
}
// Accept if:
// - peer is new
existingNode, ok := ms.peers[peer.ID]
if !ok {
continue
}
// Accept if:
// - disco key has not changed
if existingNode.DiscoKey() == peer.DiscoKey {
continue
}
// Accept if:
// - key has changed but peer is online
if peer.Online != nil && *peer.Online {
continue
}
// Accept if:
// - there's no last seen on the existing node
existingLastSeen, ok := existingNode.LastSeen().GetOk()
if !ok {
continue
}
// Accept if:
// - last seen on on control is higher
if peer.LastSeen != nil && peer.LastSeen.After(existingLastSeen) {
continue
}
// Overwrite the key and last seen in the full netmap update.
peer.DiscoKey = existingNode.DiscoKey()
if t, ok := existingNode.LastSeen().GetOk(); ok {
peer.LastSeen = new(t)
} else {
peer.LastSeen = nil
}
}
}
// updateStateFromResponse updates ms from res. It takes ownership of res.
func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
ms.updatePeersStateFromResponse(resp)
if resp.Node != nil {
ms.lastNode = resp.Node.View()
capSet := set.Set[tailcfg.NodeCapability]{}
for _, c := range resp.Node.Capabilities {
capSet.Add(c)
}
for c := range resp.Node.CapMap {
capSet.Add(c)
}
ms.lastCapSet = capSet
}
for _, up := range resp.UserProfiles {
ms.lastUserProfile[up.ID] = up.View()
}
// TODO(bradfitz): clean up old user profiles? maybe not worth it.
if dm := resp.DERPMap; dm != nil {
ms.vlogf("netmap: new map contains DERP map")
// Guard against the control server accidentally sending
// a nil region definition, which at least Headscale was
// observed to send.
for rid, r := range dm.Regions {
if r == nil {
delete(dm.Regions, rid)
}
}
// In the copy/v86 wasm environment with limited networking, if the
// control plane didn't pick our DERP home for us, do it ourselves and
// mark all but the lowest region as NoMeasureNoHome. For prod, this
// will be Region 1, NYC, a compromise between the US and Europe. But
// really the control plane should pick this. This is only a fallback.
if hostinfo.IsInVM86() {
numCanMeasure := 0
lowest := 0
for rid, r := range dm.Regions {
if !r.NoMeasureNoHome {
numCanMeasure++
if lowest == 0 || rid < lowest {
lowest = rid
}
}
}
if numCanMeasure > 1 {
for rid, r := range dm.Regions {
if rid != lowest {
r.NoMeasureNoHome = true
}
}
}
}
// Zero-valued fields in a DERPMap mean that we're not changing
// anything and are using the previous value(s).
if ldm := ms.lastDERPMap; ldm != nil {
if dm.Regions == nil {
dm.Regions = ldm.Regions
dm.OmitDefaultRegions = ldm.OmitDefaultRegions
}
if dm.HomeParams == nil {
dm.HomeParams = ldm.HomeParams
} else if oldhh := ldm.HomeParams; oldhh != nil {
// Propagate sub-fields of HomeParams
hh := dm.HomeParams
if hh.RegionScore == nil {
hh.RegionScore = oldhh.RegionScore
}
}
}
ms.lastDERPMap = dm
}
var packetFilterChanged bool
// Older way, one big blob:
if pf := resp.PacketFilter; pf != nil {
packetFilterChanged = true
mak.Set(&ms.namedPacketFilters, "base", views.SliceOf(pf))
}
// Newer way, named chunks:
if m := resp.PacketFilters; m != nil {
packetFilterChanged = true
if v, ok := m["*"]; ok && v == nil {
ms.namedPacketFilters = nil
}
for k, v := range m {
if k == "*" {
continue
}
if v != nil {
mak.Set(&ms.namedPacketFilters, k, views.SliceOf(v))
} else {
delete(ms.namedPacketFilters, k)
}
}
}
if packetFilterChanged {
var concat []tailcfg.FilterRule
for _, v := range slices.Sorted(maps.Keys(ms.namedPacketFilters)) {
concat = ms.namedPacketFilters[v].AppendTo(concat)
}
ms.lastPacketFilterRules = views.SliceOf(concat)
var err error
ms.lastParsedPacketFilter, err = filter.MatchesFromFilterRules(concat)
if err != nil {
ms.logf("parsePacketFilter: %v", err)
}
}
if c := resp.DNSConfig; c != nil {
ms.lastDNSConfig = c
}
if p := resp.SSHPolicy; p != nil {
ms.lastSSHPolicy = p
}
if v, ok := resp.CollectServices.Get(); ok {
ms.collectServices = v
}
if resp.Domain != "" {
ms.lastDomain = resp.Domain
}
if resp.DomainDataPlaneAuditLogID != "" {
ms.lastDomainAuditLogID = resp.DomainDataPlaneAuditLogID
}
if resp.Health != nil {
ms.lastHealth = resp.Health
}
if resp.DisplayMessages != nil {
if v, ok := resp.DisplayMessages["*"]; ok && v == nil {
ms.lastDisplayMessages = nil
}
for k, v := range resp.DisplayMessages {
if k == "*" {
continue
}
if v != nil {
mak.Set(&ms.lastDisplayMessages, k, *v)
} else {
delete(ms.lastDisplayMessages, k)
}
}
}
if resp.TKAInfo != nil {
ms.lastTKAInfo = resp.TKAInfo
}
}
var (
patchDERPRegion = clientmetric.NewCounter("controlclient_patch_derp")
patchEndpoints = clientmetric.NewCounter("controlclient_patch_endpoints")
patchCap = clientmetric.NewCounter("controlclient_patch_capver")
patchKey = clientmetric.NewCounter("controlclient_patch_key")
patchDiscoKey = clientmetric.NewCounter("controlclient_patch_discokey")
patchOnline = clientmetric.NewCounter("controlclient_patch_online")
patchLastSeen = clientmetric.NewCounter("controlclient_patch_lastseen")
patchKeyExpiry = clientmetric.NewCounter("controlclient_patch_keyexpiry")
patchCapMap = clientmetric.NewCounter("controlclient_patch_capmap")
patchKeySignature = clientmetric.NewCounter("controlclient_patch_keysig")
patchifiedPeer = clientmetric.NewCounter("controlclient_patchified_peer")
patchifiedPeerEqual = clientmetric.NewCounter("controlclient_patchified_peer_equal")
)
// updatePeersStateFromResponseres updates ms.peers from resp.
// It takes ownership of resp.
func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) {
ms.peersMu.Lock()
defer ms.peersMu.Unlock()
if ms.peers == nil {
ms.peers = make(map[tailcfg.NodeID]tailcfg.NodeView)
}
if len(resp.Peers) > 0 {
// Not delta encoded.
stats.allNew = true
keep := make(map[tailcfg.NodeID]bool, len(resp.Peers))
for _, n := range resp.Peers {
keep[n.ID] = true
lenBefore := len(ms.peers)
ms.peers[n.ID] = n.View()
if len(ms.peers) == lenBefore {
stats.changed++
} else {
stats.added++
}
}
for id := range ms.peers {
if !keep[id] {
stats.removed++
delete(ms.peers, id)
}
}
// Peers precludes all other delta operations so just return.
return
}
for _, id := range resp.PeersRemoved {
if _, ok := ms.peers[id]; ok {
delete(ms.peers, id)
stats.removed++
}
}
for _, n := range resp.PeersChanged {
lenBefore := len(ms.peers)
ms.peers[n.ID] = n.View()
if len(ms.peers) == lenBefore {
stats.changed++
} else {
stats.added++
}
}
for nodeID, seen := range resp.PeerSeenChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
if seen {
mut.LastSeen = new(clock.Now())
} else {
mut.LastSeen = nil
}
ms.peers[nodeID] = mut.View()
stats.changed++
}
}
for nodeID, online := range resp.OnlineChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
mut.Online = new(online)
ms.peers[nodeID] = mut.View()
stats.changed++
}
}
for _, pc := range resp.PeersChangedPatch {
vp, ok := ms.peers[pc.NodeID]
if !ok {
continue
}
stats.changed++
mut := vp.AsStruct()
if pc.DERPRegion != 0 {
mut.HomeDERP = pc.DERPRegion
patchDERPRegion.Add(1)
}
if pc.Cap != 0 {
mut.Cap = pc.Cap
patchCap.Add(1)
}
if pc.Endpoints != nil {
mut.Endpoints = pc.Endpoints
patchEndpoints.Add(1)
}
if pc.Key != nil {
mut.Key = *pc.Key
patchKey.Add(1)
}
if pc.DiscoKey != nil {
mut.DiscoKey = *pc.DiscoKey
patchDiscoKey.Add(1)
}
if v := pc.Online; v != nil {
mut.Online = new(*v)
patchOnline.Add(1)
}
if v := pc.LastSeen; v != nil {
mut.LastSeen = new(*v)
patchLastSeen.Add(1)
}
if v := pc.KeyExpiry; v != nil {
mut.KeyExpiry = *v
patchKeyExpiry.Add(1)
}
if v := pc.KeySignature; v != nil {
mut.KeySignature = v
patchKeySignature.Add(1)
}
if v := pc.CapMap; v != nil {
mut.CapMap = v
patchCapMap.Add(1)
}
ms.peers[pc.NodeID] = mut.View()
}
return
}
func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) {
if userID == 0 {
return
}
if _, dup := nm.UserProfiles[userID]; dup {
// Already populated it from a previous peer.
return
}
if up, ok := ms.lastUserProfile[userID]; ok {
nm.UserProfiles[userID] = up
}
}
var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER")
var debugPatchifyPeerMiss = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER_MISS")
// patchifyMissOnFalse, if non-nil, is called with the field name when
// patchifyPeer fails. It is set by an init func in map_debug.go.
var patchifyMissOnFalse func(string)
// patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch
// when possible.
func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) {
var onFalse func(string)
if debugPatchifyPeerMiss() {
onFalse = patchifyMissOnFalse
}
filtered := resp.PeersChanged[:0]
for _, n := range resp.PeersChanged {
if p, ok := ms.patchifyPeer(n, onFalse); ok {
patchifiedPeer.Add(1)
if debugPatchifyPeer() {
patchj, _ := json.Marshal(p)
ms.logf("debug: patchifyPeer[ID=%v]: %s", n.ID, patchj)
}
if p != nil {
resp.PeersChangedPatch = append(resp.PeersChangedPatch, p)
} else {
patchifiedPeerEqual.Add(1)
}
} else {
filtered = append(filtered, n)
}
}
resp.PeersChanged = filtered
if len(resp.PeersChanged) == 0 {
resp.PeersChanged = nil
}
}
var nodeFields = sync.OnceValue(getNodeFields)
// getNodeFields returns the fields of tailcfg.Node.
func getNodeFields() []string {
rt := reflect.TypeFor[tailcfg.Node]()
ret := make([]string, 0, rt.NumField())
for f := range rt.Fields() {
ret = append(ret, f.Name)
}
return ret
}
// patchifyPeer returns a *tailcfg.PeerChange of the session's existing copy of
// the n.ID Node to n.
//
// It returns ok=false if a patch can't be made, (V, ok) on a delta, or (nil,
// true) if all the fields were identical (a zero change).
func (ms *mapSession) patchifyPeer(n *tailcfg.Node, onFalse func(string)) (_ *tailcfg.PeerChange, ok bool) {
ms.peersMu.RLock()
defer ms.peersMu.RUnlock()
was, ok := ms.peers[n.ID]
if !ok {
if onFalse != nil {
onFalse("peer_not_found")
}
return nil, false
}
return peerChangeDiff(was, n, onFalse)
}
// peerChangeDiff returns the difference from 'was' to 'n', if possible.
//
// It returns (nil, true) if the fields were identical.
func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node, onFalse func(string)) (_ *tailcfg.PeerChange, ok bool) {
if onFalse == nil {
onFalse = func(string) {}
}
var ret *tailcfg.PeerChange
pc := func() *tailcfg.PeerChange {
if ret == nil {
ret = new(tailcfg.PeerChange)
}
return ret
}
for _, field := range nodeFields() {
switch field {
default:
// The whole point of using reflect in this function is to panic
// here in tests if we forget to handle a new field.
panic("unhandled field: " + field)
case "computedHostIfDifferent", "ComputedName", "ComputedNameWithHost":
// Caller's responsibility to have populated these.
continue
case "DataPlaneAuditLogID":
// Not sent for peers.
case "Capabilities":
// Deprecated; see https://github.com/tailscale/tailscale/issues/11508
// And it was never sent by any known control server.
case "ID":
if was.ID() != n.ID {
onFalse(field)
return nil, false
}
case "StableID":
if was.StableID() != n.StableID {
onFalse(field)
return nil, false
}
case "Name":
if was.Name() != n.Name {
onFalse(field)
return nil, false
}
case "User":
if was.User() != n.User {
onFalse(field)
return nil, false
}
case "Sharer":
if was.Sharer() != n.Sharer {
onFalse(field)
return nil, false
}
case "Key":
if was.Key() != n.Key {
pc().Key = new(n.Key)
}
case "KeyExpiry":
if !was.KeyExpiry().Equal(n.KeyExpiry) {
pc().KeyExpiry = new(n.KeyExpiry)
}
case "KeySignature":
if !was.KeySignature().Equal(n.KeySignature) {
pc().KeySignature = slices.Clone(n.KeySignature)
}
case "Machine":
if was.Machine() != n.Machine {
onFalse(field)
return nil, false
}
case "DiscoKey":
if was.DiscoKey() != n.DiscoKey {
pc().DiscoKey = new(n.DiscoKey)
}
case "Addresses":
if !views.SliceEqual(was.Addresses(), views.SliceOf(n.Addresses)) {
onFalse(field)
return nil, false
}
case "AllowedIPs":
if !views.SliceEqual(was.AllowedIPs(), views.SliceOf(n.AllowedIPs)) {
onFalse(field)
return nil, false
}
case "Endpoints":
if !views.SliceEqual(was.Endpoints(), views.SliceOf(n.Endpoints)) {
pc().Endpoints = slices.Clone(n.Endpoints)
}
case "LegacyDERPString":
if was.LegacyDERPString() != "" || n.LegacyDERPString != "" {
panic("unexpected; caller should've already called upgradeNode")
}
case "HomeDERP":
if was.HomeDERP() != n.HomeDERP {
pc().DERPRegion = n.HomeDERP
}
case "Hostinfo":
if !was.Hostinfo().Valid() && !n.Hostinfo.Valid() {
continue
}
if !was.Hostinfo().Valid() || !n.Hostinfo.Valid() {
onFalse(field)
return nil, false
}
if !was.Hostinfo().Equal(n.Hostinfo) {
onFalse(field)
return nil, false
}
case "Created":
if !was.Created().Equal(n.Created) {
onFalse(field)
return nil, false
}
case "Cap":
if was.Cap() != n.Cap {
pc().Cap = n.Cap
}
case "CapMap":
if len(n.CapMap) != was.CapMap().Len() {
// If they have different lengths, they're different.
if n.CapMap == nil {
pc().CapMap = make(tailcfg.NodeCapMap)
} else {
pc().CapMap = maps.Clone(n.CapMap)
}
} else {
// If they have the same length, check that all their keys
// have the same values.
for k, v := range was.CapMap().All() {
nv, ok := n.CapMap[k]
if !ok || !views.SliceEqual(v, views.SliceOf(nv)) {
pc().CapMap = maps.Clone(n.CapMap)
break
}
}
}
case "Tags":
if !views.SliceEqual(was.Tags(), views.SliceOf(n.Tags)) {
onFalse(field)
return nil, false
}
case "PrimaryRoutes":
if !views.SliceEqual(was.PrimaryRoutes(), views.SliceOf(n.PrimaryRoutes)) {
onFalse(field)
return nil, false
}
case "Online":
if wasOnline, ok := was.Online().GetOk(); ok && n.Online != nil && *n.Online != wasOnline {
pc().Online = new(*n.Online)
}
case "LastSeen":
if wasSeen, ok := was.LastSeen().GetOk(); ok && n.LastSeen != nil && !wasSeen.Equal(*n.LastSeen) {
pc().LastSeen = new(*n.LastSeen)
}
case "MachineAuthorized":
if was.MachineAuthorized() != n.MachineAuthorized {
onFalse(field)
return nil, false
}
case "UnsignedPeerAPIOnly":
if was.UnsignedPeerAPIOnly() != n.UnsignedPeerAPIOnly {
onFalse(field)
return nil, false
}
case "IsWireGuardOnly":
if was.IsWireGuardOnly() != n.IsWireGuardOnly {
onFalse(field)
return nil, false
}
case "IsJailed":
if was.IsJailed() != n.IsJailed {
onFalse(field)
return nil, false
}
case "Expired":
if was.Expired() != n.Expired {
onFalse(field)
return nil, false
}
case "SelfNodeV4MasqAddrForThisPeer":
va, vb := was.SelfNodeV4MasqAddrForThisPeer(), n.SelfNodeV4MasqAddrForThisPeer
if !va.Valid() && vb == nil {
continue
}
if va, ok := va.GetOk(); !ok || vb == nil || va != *vb {
onFalse(field)
return nil, false
}
case "SelfNodeV6MasqAddrForThisPeer":
va, vb := was.SelfNodeV6MasqAddrForThisPeer(), n.SelfNodeV6MasqAddrForThisPeer
if !va.Valid() && vb == nil {
continue
}
if va, ok := va.GetOk(); !ok || vb == nil || va != *vb {
onFalse(field)
return nil, false
}
case "ExitNodeDNSResolvers":
va, vb := was.ExitNodeDNSResolvers(), views.SliceOfViews(n.ExitNodeDNSResolvers)
if va.Len() != vb.Len() {
onFalse(field)
return nil, false
}
for i := range va.Len() {
if !va.At(i).Equal(vb.At(i)) {
onFalse(field)
return nil, false
}
}
}
}
if ret != nil {
ret.NodeID = n.ID
}
return ret, true
}
// PeerIDAndKeyByTailscaleIP returns the node ID and node Key from the peers
// map without touching the netmap itself. The implementation mirrors the
// implementation of [netmap.PeerByTailscaleIP].
func (ms *mapSession) PeerIDAndKeyByTailscaleIP(ip netip.Addr) (tailcfg.NodeID, key.NodePublic, bool) {
ms.peersMu.RLock()
defer ms.peersMu.RUnlock()
for _, n := range ms.peers {
ad := n.Addresses()
for i := range ad.Len() {
a := ad.At(i)
if a.Addr() == ip {
return n.ID(), n.Key(), true
}
}
}
return 0, key.NodePublic{}, false
}
func (ms *mapSession) sortedPeers() []tailcfg.NodeView {
ms.peersMu.RLock()
defer ms.peersMu.RUnlock()
ret := slicesx.MapValues(ms.peers)
slices.SortFunc(ret, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID())
})
return ret
}
// netmap returns a fully populated NetworkMap from the last state seen from
// a call to updateStateFromResponse, filling in omitted
// information from prior MapResponse values.
func (ms *mapSession) netmap() *netmap.NetworkMap {
peerViews := ms.sortedPeers()
var msgs map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage
if len(ms.lastDisplayMessages) != 0 {
msgs = ms.lastDisplayMessages
} else if len(ms.lastHealth) > 0 {
// Convert all ms.lastHealth to the new [netmap.NetworkMap.DisplayMessages]
for _, h := range ms.lastHealth {
id := "health-" + strhash(h) // Unique ID in case there is more than one health message
mak.Set(&msgs, tailcfg.DisplayMessageID(id), tailcfg.DisplayMessage{
Title: "Coordination server reports an issue",
Severity: tailcfg.SeverityMedium,
Text: "The coordination server is reporting a health issue: " + h,
})
}
}
nm := &netmap.NetworkMap{
NodeKey: ms.publicNodeKey,
MachineKey: ms.machinePubKey,
Peers: peerViews,
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfileView),
Domain: ms.lastDomain,
DomainAuditLogID: ms.lastDomainAuditLogID,
DNS: *ms.lastDNSConfig,
PacketFilter: ms.lastParsedPacketFilter,
PacketFilterRules: ms.lastPacketFilterRules,
SSHPolicy: ms.lastSSHPolicy,
CollectServices: ms.collectServices,
DERPMap: ms.lastDERPMap,
DisplayMessages: msgs,
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
}
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
ms.logf("error unmarshalling TKAHead: %v", err)
nm.TKAEnabled = false
}
}
if node := ms.lastNode; node.Valid() {
nm.SelfNode = node
nm.AllCaps = ms.lastCapSet
}
ms.addUserProfile(nm, nm.User())
for _, peer := range peerViews {
ms.addUserProfile(nm, peer.Sharer())
ms.addUserProfile(nm, peer.User())
}
if DevKnob.ForceProxyDNS() {
nm.DNS.Proxied = true
}
return nm
}
func strhash(h string) string {
s := sha256.New()
io.WriteString(s, h)
return hex.EncodeToString(s.Sum(nil))
}