diff --git a/control/controlclient/map.go b/control/controlclient/map.go index c8cbdbce5..116d3cebd 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -107,9 +107,10 @@ type mapSession struct { changeQueueClosed bool processQueue sync.WaitGroup - // mu protects the peers map. - peersMu sync.RWMutex - peers map[tailcfg.NodeID]tailcfg.NodeView + // peersMu protects the peers map and sortedPeersCache. + peersMu sync.RWMutex + peers map[tailcfg.NodeID]tailcfg.NodeView + sortedPeersCache []tailcfg.NodeView // lazily computed sorted view of peers; nil means dirty } // newMapSession returns a mostly unconfigured new mapSession. @@ -678,6 +679,8 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s ms.peersMu.Lock() defer ms.peersMu.Unlock() + ms.sortedPeersCache = nil // invalidate; peers are about to change + if ms.peers == nil { ms.peers = make(map[tailcfg.NodeID]tailcfg.NodeView) } @@ -1082,12 +1085,24 @@ func (ms *mapSession) PeerIDAndKeyByTailscaleIP(ip netip.Addr) (tailcfg.NodeID, func (ms *mapSession) sortedPeers() []tailcfg.NodeView { ms.peersMu.RLock() - defer ms.peersMu.RUnlock() + if c := ms.sortedPeersCache; c != nil { + ms.peersMu.RUnlock() + return c + } + ms.peersMu.RUnlock() + // Cache miss; upgrade to write lock and rebuild. + ms.peersMu.Lock() + defer ms.peersMu.Unlock() + // Double-check after acquiring write lock. + if ms.sortedPeersCache != nil { + return ms.sortedPeersCache + } ret := slicesx.MapValues(ms.peers) slices.SortFunc(ret, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) }) + ms.sortedPeersCache = ret return ret } diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index f057345d9..c40705652 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -1838,3 +1838,40 @@ func TestPeerIDAndKeyByTailscaleIP(t *testing.T) { } }) } + +func BenchmarkSortedPeers(b *testing.B) { + const numPeers = 50_000 + ctx := context.Background() + nu := &countingNetmapUpdater{} + ms := newTestMapSession(b, nu) + ms.logf = func(string, ...any) {} + res := &tailcfg.MapResponse{ + Node: &tailcfg.Node{ID: 1, Name: "foo.bar.ts.net."}, + } + for i := range numPeers { + res.Peers = append(res.Peers, &tailcfg.Node{ + ID: tailcfg.NodeID(i + 2), + Name: fmt.Sprintf("peer%d.bar.ts.net.", i), + Addresses: []netip.Prefix{netip.MustParsePrefix(fmt.Sprintf("100.64.%d.%d/32", i/256, i%256))}, + AllowedIPs: []netip.Prefix{netip.MustParsePrefix(fmt.Sprintf("100.64.%d.%d/32", i/256, i%256))}, + Hostinfo: (&tailcfg.Hostinfo{OS: "linux"}).View(), + }) + } + ms.HandleNonKeepAliveMapResponse(ctx, res) + + b.Run("repeated", func(b *testing.B) { + b.ReportAllocs() + for range b.N { + ms.sortedPeers() + } + }) + b.Run("after_change", func(b *testing.B) { + b.ReportAllocs() + for range b.N { + ms.updatePeersStateFromResponse(&tailcfg.MapResponse{ + OnlineChange: map[tailcfg.NodeID]bool{2: true}, + }) + ms.sortedPeers() + } + }) +}