mirror of
https://github.com/tailscale/tailscale.git
synced 2026-05-04 19:56:35 +02:00
wgengine/magicsock,ipn/ipnlocal: store and load homeDERP from cache (#19491)
With netmap caching, the home DERP of the self node was neither saved to the cache or loaded from it, making nodes not stick to a DERP when starting without a connection to control. Instead, make sure that when a cache is available, load that cache, before looking for DERP servers. This is implemented by allowing a skip of ReSTUN in setting the DERP map (we must have a DERP map before setting the home DERP), so the DERP from cache will set itself and be sticky until a connection to control is established. Making DERP only change when connected to control is handled by existing code from f072d017bd8241675aa946a27fc1827f570435cb. Updates #19490 Signed-off-by: Claus Lensbøl <claus@tailscale.com>
This commit is contained in:
parent
1841a93ab2
commit
78627c132f
@ -35,7 +35,19 @@ func (b *LocalBackend) writeNetmapToDiskLocked(nm *netmap.NetworkMap) error {
|
||||
b.diskCache.cache = netmapcache.NewCache(netmapcache.FileStore(dir))
|
||||
b.diskCache.dir = dir
|
||||
}
|
||||
return b.diskCache.cache.Store(b.currentNode().Context(), nm)
|
||||
|
||||
// Set the homeDERP on the self node before saving. The self node homeDERP is
|
||||
// generally not used since the homeDERP for self is stored in magicsock, but
|
||||
// to be able to load it during loading the cache, we use the existing field
|
||||
// to save it.
|
||||
|
||||
// Make a shallow copy and mutate a copy of the selfNode.
|
||||
nmCopy := *nm
|
||||
selfNode := nm.SelfNode.AsStruct()
|
||||
selfNode.HomeDERP = int(b.currentNode().homeDERP.Load())
|
||||
nmCopy.SelfNode = selfNode.View()
|
||||
|
||||
return b.diskCache.cache.Store(b.currentNode().Context(), &nmCopy)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) loadDiskCacheLocked() (om *netmap.NetworkMap, ok bool) {
|
||||
|
||||
229
ipn/ipnlocal/diskcache_test.go
Normal file
229
ipn/ipnlocal/diskcache_test.go
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright (c) Tailscale Inc & contributors
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/util/eventbus"
|
||||
"tailscale.com/wgengine/magicsock"
|
||||
)
|
||||
|
||||
// newCacheTestNetmap returns a minimal valid netmap suitable for testing disk
|
||||
// cache operations.
|
||||
func newCacheTestNetmap() *netmap.NetworkMap {
|
||||
return &netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Name: "test-node.ts.net",
|
||||
User: tailcfg.UserID(1),
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.0.1/32"),
|
||||
},
|
||||
}).View(),
|
||||
UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{
|
||||
tailcfg.UserID(1): (&tailcfg.UserProfile{
|
||||
LoginName: "user@example.com",
|
||||
DisplayName: "Test User",
|
||||
}).View(),
|
||||
},
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
4: {},
|
||||
5: {},
|
||||
6: {},
|
||||
7: {},
|
||||
8: {},
|
||||
9: {},
|
||||
10: {},
|
||||
11: {},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAndLoadHomeDERP(t *testing.T) {
|
||||
b := newTestBackend(t)
|
||||
|
||||
nm := newCacheTestNetmap()
|
||||
b.currentNode().SetNetMap(nm)
|
||||
|
||||
const wantDERP = 7
|
||||
b.currentNode().homeDERP.Store(wantDERP)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if err := b.writeNetmapToDiskLocked(nm); err != nil {
|
||||
t.Fatalf("writeNetmapToDiskLocked: %v", err)
|
||||
}
|
||||
|
||||
loaded, ok := b.loadDiskCacheLocked()
|
||||
if !ok {
|
||||
t.Fatal("loadDiskCacheLocked returned ok=false")
|
||||
}
|
||||
if !loaded.SelfNode.Valid() {
|
||||
t.Fatal("loaded netmap SelfNode is invalid")
|
||||
}
|
||||
if got := loaded.SelfNode.HomeDERP(); got != wantDERP {
|
||||
t.Errorf("loaded SelfNode.HomeDERP() = %d, want %d", got, wantDERP)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnHomeDERPUpdate(t *testing.T) {
|
||||
t.Run("normal_derp_change", func(t *testing.T) {
|
||||
b := newTestBackend(t)
|
||||
done := make(chan struct{})
|
||||
tstest.Replace(t, &testOnlyHomeDERPUpdate, func() { close(done) })
|
||||
|
||||
nm := newCacheTestNetmap()
|
||||
b.currentNode().SetNetMap(nm)
|
||||
|
||||
// Publish a HomeDERPChanged event via the backend's event bus.
|
||||
bus := b.Sys().Bus.Get()
|
||||
ec := bus.Client("test.TestOnHomeDERPUpdate")
|
||||
pub := eventbus.Publish[magicsock.HomeDERPChanged](ec)
|
||||
|
||||
const wantDERP = 11
|
||||
pub.Publish(magicsock.HomeDERPChanged{Old: 0, New: wantDERP})
|
||||
<-done
|
||||
|
||||
if got := b.currentNode().homeDERP.Load(); got != wantDERP {
|
||||
t.Errorf("b.homeDERP = %d, want %d", got, wantDERP)
|
||||
}
|
||||
|
||||
// Verify the value was persisted to the disk cache.
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
loaded, ok := b.loadDiskCacheLocked()
|
||||
if !ok {
|
||||
t.Fatal("loadDiskCacheLocked returned ok=false after homeDERP update")
|
||||
}
|
||||
if got := loaded.SelfNode.HomeDERP(); got != wantDERP {
|
||||
t.Errorf("cached SelfNode.HomeDERP() = %d, want %d", got, wantDERP)
|
||||
}
|
||||
})
|
||||
t.Run("old_does_not_match", func(t *testing.T) {
|
||||
b := newTestBackend(t)
|
||||
done := make(chan struct{})
|
||||
tstest.Replace(t, &testOnlyHomeDERPUpdate, func() { close(done) })
|
||||
|
||||
const setDERP = 11
|
||||
const wantDERP = 4
|
||||
|
||||
nm := newCacheTestNetmap()
|
||||
selfNode := nm.SelfNode.AsStruct()
|
||||
selfNode.HomeDERP = wantDERP
|
||||
nm.SelfNode = selfNode.View()
|
||||
b.currentNode().SetNetMap(nm)
|
||||
b.currentNode().homeDERP.Store(wantDERP)
|
||||
|
||||
// Write an initial cache entry so we can verify it is not overwritten.
|
||||
b.mu.Lock()
|
||||
if err := b.writeNetmapToDiskLocked(nm); err != nil {
|
||||
b.mu.Unlock()
|
||||
t.Fatalf("setup writeNetmapToDiskLocked: %v", err)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
// Publish a HomeDERPChanged event via the backend's event bus.
|
||||
bus := b.Sys().Bus.Get()
|
||||
ec := bus.Client("test.TestOnHomeDERPUpdate")
|
||||
pub := eventbus.Publish[magicsock.HomeDERPChanged](ec)
|
||||
pub.Publish(magicsock.HomeDERPChanged{Old: wantDERP + 1, New: setDERP})
|
||||
<-done
|
||||
|
||||
if got := b.currentNode().homeDERP.Load(); got != wantDERP {
|
||||
t.Errorf("b.homeDERP = %d, wanted no change %d", got, wantDERP)
|
||||
}
|
||||
|
||||
// Verify the cache still exists and still holds the original value.
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
loaded, ok := b.loadDiskCacheLocked()
|
||||
if !ok {
|
||||
t.Fatal("loadDiskCacheLocked returned ok=false; expected cache to still exist")
|
||||
}
|
||||
if got := loaded.SelfNode.HomeDERP(); got != wantDERP {
|
||||
t.Errorf("cached SelfNode.HomeDERP() = %d after rejected event, want original %d", got, wantDERP)
|
||||
}
|
||||
})
|
||||
t.Run("new_does_not_exist_in_map", func(t *testing.T) {
|
||||
b := newTestBackend(t)
|
||||
done := make(chan struct{})
|
||||
tstest.Replace(t, &testOnlyHomeDERPUpdate, func() { close(done) })
|
||||
|
||||
const setDERP = 111
|
||||
const wantDERP = 4
|
||||
|
||||
nm := newCacheTestNetmap()
|
||||
selfNode := nm.SelfNode.AsStruct()
|
||||
selfNode.HomeDERP = wantDERP
|
||||
nm.SelfNode = selfNode.View()
|
||||
b.currentNode().SetNetMap(nm)
|
||||
b.currentNode().homeDERP.Store(wantDERP)
|
||||
|
||||
// Write an initial cache entry so we can verify it is not overwritten.
|
||||
b.mu.Lock()
|
||||
if err := b.writeNetmapToDiskLocked(nm); err != nil {
|
||||
b.mu.Unlock()
|
||||
t.Fatalf("setup writeNetmapToDiskLocked: %v", err)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
// Publish a HomeDERPChanged event via the backend's event bus.
|
||||
// Old matches the stored homeDERP so only the "new region not in map"
|
||||
// guard is exercised.
|
||||
bus := b.Sys().Bus.Get()
|
||||
ec := bus.Client("test.TestOnHomeDERPUpdate")
|
||||
pub := eventbus.Publish[magicsock.HomeDERPChanged](ec)
|
||||
pub.Publish(magicsock.HomeDERPChanged{Old: wantDERP, New: setDERP})
|
||||
<-done
|
||||
|
||||
if got := b.currentNode().homeDERP.Load(); got != wantDERP {
|
||||
t.Errorf("b.homeDERP = %d, wanted no change %d", got, wantDERP)
|
||||
}
|
||||
|
||||
// Verify the cache still exists and still holds the original value.
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
loaded, ok := b.loadDiskCacheLocked()
|
||||
if !ok {
|
||||
t.Fatal("loadDiskCacheLocked returned ok=false; expected cache to still exist")
|
||||
}
|
||||
if got := loaded.SelfNode.HomeDERP(); got != wantDERP {
|
||||
t.Errorf("cached SelfNode.HomeDERP() = %d after rejected event, want original %d", got, wantDERP)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteNetmapDoesNotMutateOriginal(t *testing.T) {
|
||||
b := newTestBackend(t)
|
||||
|
||||
nm := newCacheTestNetmap()
|
||||
b.currentNode().SetNetMap(nm)
|
||||
|
||||
originalDERP := nm.SelfNode.HomeDERP() // expected to be 0 initially
|
||||
|
||||
const storeDERP = 5
|
||||
b.currentNode().homeDERP.Store(storeDERP)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if err := b.writeNetmapToDiskLocked(nm); err != nil {
|
||||
t.Fatalf("writeNetmapToDiskLocked: %v", err)
|
||||
}
|
||||
|
||||
// The original netmap must not have been mutated.
|
||||
if got := nm.SelfNode.HomeDERP(); got != originalDERP {
|
||||
t.Errorf("original nm.SelfNode.HomeDERP() = %d after write, want %d (original was mutated)", got, originalDERP)
|
||||
}
|
||||
}
|
||||
@ -627,6 +627,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
}
|
||||
eventbus.SubscribeFunc(ec, b.onAppConnectorRouteUpdate)
|
||||
eventbus.SubscribeFunc(ec, b.onAppConnectorStoreRoutes)
|
||||
eventbus.SubscribeFunc(ec, b.onHomeDERPUpdate)
|
||||
mConn.SetNetInfoCallback(b.setNetInfo) // TODO(tailscale/tailscale#17887): move to eventbus
|
||||
|
||||
return b, nil
|
||||
@ -658,6 +659,51 @@ func (b *LocalBackend) onAppConnectorStoreRoutes(ri appctype.RouteInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// testOnlyHomeDERPUpdate if non-nil is called after setting home DERP and
|
||||
// writing netmap to disk.
|
||||
var testOnlyHomeDERPUpdate func()
|
||||
|
||||
func (b *LocalBackend) onHomeDERPUpdate(du magicsock.HomeDERPChanged) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
b.onHomeDERPUpdateLocked(du)
|
||||
|
||||
if testOnlyHomeDERPUpdate != nil {
|
||||
testOnlyHomeDERPUpdate()
|
||||
}
|
||||
}
|
||||
|
||||
// onHomeDERPUpdateLocked considitonally updates the homeDERP for use in the
|
||||
// netmap cache.
|
||||
// If we switched our currentNode by switching profiles, we might be trying
|
||||
// to update the homeDERP from another profile. If the old homeDERP does not
|
||||
// match what we expect, don't swap the homeDERP.
|
||||
// In practice, it is possible that one profile with a homeDERP of 0 (no-derp)
|
||||
// got switched before setting any home DERP or that DERP IDs match across
|
||||
// DERP maps. Since the risk of this happening is small and the consequences
|
||||
// of this is is just a possible less optimal DERP until the next reSTUN,
|
||||
// accept this possibility.
|
||||
func (b *LocalBackend) onHomeDERPUpdateLocked(du magicsock.HomeDERPChanged) {
|
||||
cn := b.currentNode()
|
||||
|
||||
if cn == nil || cn.DERPMap() == nil || cn.DERPMap().Regions == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := cn.DERPMap().Regions[du.New]; !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if !cn.homeDERP.CompareAndSwap(int64(du.Old), int64(du.New)) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := b.writeNetmapToDiskLocked(b.NetMap()); err != nil {
|
||||
b.logf("write netmap to cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackend) Clock() tstime.Clock { return b.clock }
|
||||
func (b *LocalBackend) Sys() *tsd.System { return b.sys }
|
||||
|
||||
@ -1821,7 +1867,18 @@ func (b *LocalBackend) setControlClientStatusLocked(c controlclient.Client, st c
|
||||
}
|
||||
|
||||
b.e.SetNetworkMap(st.NetMap)
|
||||
b.MagicConn().SetDERPMap(st.NetMap.DERPMap)
|
||||
b.MagicConn().SetDERPMap(st.NetMap.DERPMap, false)
|
||||
if c == nil && st.NetMap.Cached && st.NetMap.SelfNode.Valid() {
|
||||
// Loading from a cached netmap (c == nil means no live control
|
||||
// client). Pre-seed the home DERP from the cached self node so
|
||||
// that the guard in maybeSetNearestDERP prevents changing the
|
||||
// DERP home before we reconnect to the control plane. If the cache has
|
||||
// nothing in it, skip this, and let the node pick a DERP itself.
|
||||
if cachedHome := st.NetMap.SelfNode.HomeDERP(); cachedHome != 0 {
|
||||
b.health.SetOutOfPollNetMap()
|
||||
b.MagicConn().ForceSetNearestDERP(cachedHome)
|
||||
}
|
||||
}
|
||||
b.MagicConn().SetOnlyTCP443(st.NetMap.HasCap(tailcfg.NodeAttrOnlyTCP443))
|
||||
|
||||
// Update our cached DERP map
|
||||
@ -3388,7 +3445,7 @@ func (b *LocalBackend) DebugForceNetmapUpdate() {
|
||||
nm := b.currentNode().NetMap()
|
||||
b.e.SetNetworkMap(nm)
|
||||
if nm != nil {
|
||||
b.MagicConn().SetDERPMap(nm.DERPMap)
|
||||
b.MagicConn().SetDERPMap(nm.DERPMap, true)
|
||||
}
|
||||
b.setNetMapLocked(nm)
|
||||
}
|
||||
@ -4846,7 +4903,7 @@ func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView {
|
||||
}
|
||||
|
||||
if netMap != nil {
|
||||
b.MagicConn().SetDERPMap(netMap.DERPMap)
|
||||
b.MagicConn().SetDERPMap(netMap.DERPMap, true)
|
||||
}
|
||||
|
||||
if !oldp.WantRunning() && newp.WantRunning && cc != nil {
|
||||
@ -5208,7 +5265,6 @@ func (b *LocalBackend) authReconfig() {
|
||||
//
|
||||
// b.mu must be held.
|
||||
func (b *LocalBackend) authReconfigLocked() {
|
||||
|
||||
if b.shutdownCalled {
|
||||
b.logf("[v1] authReconfig: skipping because in shutdown")
|
||||
return
|
||||
|
||||
@ -80,6 +80,13 @@ type nodeBackend struct {
|
||||
eventClient *eventbus.Client
|
||||
derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView]
|
||||
|
||||
// homeDERP lives here temporarily. as long as mapSession is short lived, we
|
||||
// don't have a location delivering netmaps to local backend that knows our
|
||||
// homeDERP hence why it is cached here for now.
|
||||
// TODO(cmol): move this field into a refactored mapSession that is not
|
||||
// short lived.
|
||||
homeDERP atomic.Int64
|
||||
|
||||
// TODO(nickkhyl): maybe use sync.RWMutex?
|
||||
mu syncs.Mutex // protects the following fields
|
||||
|
||||
|
||||
@ -156,8 +156,8 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip.
|
||||
})
|
||||
|
||||
// Not using DERP in this test (for now?).
|
||||
s1.MagicSock.Get().SetDERPMap(&tailcfg.DERPMap{})
|
||||
s2.MagicSock.Get().SetDERPMap(&tailcfg.DERPMap{})
|
||||
s1.MagicSock.Get().SetDERPMap(&tailcfg.DERPMap{}, true)
|
||||
s2.MagicSock.Get().SetDERPMap(&tailcfg.DERPMap{}, true)
|
||||
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
@ -102,6 +102,7 @@ type activeDerp struct {
|
||||
|
||||
var (
|
||||
pickDERPFallbackForTests func() int
|
||||
reSTUNHookForTests func(why string)
|
||||
)
|
||||
|
||||
// pickDERPFallback returns a non-zero but deterministic DERP node to
|
||||
@ -155,7 +156,7 @@ var checkControlHealthDuringNearestDERPInTests = false
|
||||
// region that it selected and set (via setNearestDERP).
|
||||
//
|
||||
// c.mu must NOT be held.
|
||||
func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int) {
|
||||
func (c *Conn) maybeSetNearestDERP(report *netcheck.Report, force bool) (preferredDERP int) {
|
||||
// Don't change our PreferredDERP if we don't have a connection to
|
||||
// control; if we don't, then we can't inform peers about a DERP home
|
||||
// change, which breaks all connectivity. Even if this DERP region is
|
||||
@ -169,7 +170,10 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int)
|
||||
//
|
||||
// Despite the above behaviour, ensure that we set the nearest DERP if
|
||||
// we don't currently have one set; any DERP server is better than
|
||||
// none, even if not connected to control.
|
||||
// none, even if not connected to control. The exception here is if we have
|
||||
// a cached netmap with a previous DERP server. Retaining the previous DERP
|
||||
// makes it easier for other nodes to find each other when control is not
|
||||
// available.
|
||||
var connectedToControl bool
|
||||
if testenv.InTest() && !checkControlHealthDuringNearestDERPInTests {
|
||||
connectedToControl = true
|
||||
@ -179,7 +183,7 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int)
|
||||
c.mu.Lock()
|
||||
myDerp := c.myDerp
|
||||
c.mu.Unlock()
|
||||
if !connectedToControl {
|
||||
if !connectedToControl && !force {
|
||||
if myDerp != 0 {
|
||||
metricDERPHomeNoChangeNoControl.Add(1)
|
||||
return myDerp
|
||||
@ -198,15 +202,32 @@ func (c *Conn) maybeSetNearestDERP(report *netcheck.Report) (preferredDERP int)
|
||||
}
|
||||
if preferredDERP != myDerp {
|
||||
c.logf(
|
||||
"magicsock: home DERP changing from derp-%d [%dms] to derp-%d [%dms]",
|
||||
c.myDerp, report.RegionLatency[myDerp].Milliseconds(), preferredDERP, report.RegionLatency[preferredDERP].Milliseconds())
|
||||
"magicsock: home DERP changing from derp-%d [%dms] to derp-%d [%dms] (forced=%t)",
|
||||
c.myDerp, report.RegionLatency[myDerp].Milliseconds(), preferredDERP, report.RegionLatency[preferredDERP].Milliseconds(), force)
|
||||
}
|
||||
if !c.setNearestDERP(preferredDERP) {
|
||||
preferredDERP = 0
|
||||
} else if preferredDERP != myDerp {
|
||||
c.homeDERPChangedPub.Publish(HomeDERPChanged{Old: myDerp, New: preferredDERP})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HomeDERPChanged is an event sent on the [eventbus.Bus] when a new home DERP
|
||||
// server has been selected. Its publisher is [magicsock.Coon]; its main
|
||||
// subscriber is [ipnlocal.LocalBackend] that updates the homeDERP used by the
|
||||
// netmap cache.
|
||||
// TODO(cmol): Move the subscriber to not inject into localBackend, but rather
|
||||
// into the netmap at the controlClient mapSession level once there is a stable
|
||||
// abstraction to use.
|
||||
type HomeDERPChanged struct {
|
||||
Old, New int
|
||||
}
|
||||
|
||||
func (c *Conn) ForceSetNearestDERP(regionID int) int {
|
||||
return c.maybeSetNearestDERP(&netcheck.Report{PreferredDERP: regionID}, true)
|
||||
}
|
||||
|
||||
func (c *Conn) derpRegionCodeLocked(regionID int) string {
|
||||
if c.derpMap == nil {
|
||||
return ""
|
||||
@ -771,7 +792,11 @@ func (c *Conn) SetOnlyTCP443(v bool) {
|
||||
|
||||
// SetDERPMap controls which (if any) DERP servers are used.
|
||||
// A nil value means to disable DERP; it's disabled by default.
|
||||
func (c *Conn) SetDERPMap(dm *tailcfg.DERPMap) {
|
||||
//
|
||||
// If doReStun is false, the post setting ReSTUN is not performed.
|
||||
// This flag is used for setting the map from a cache to make it possible to
|
||||
// also set the homeDERP from cache.
|
||||
func (c *Conn) SetDERPMap(dm *tailcfg.DERPMap, doReStun bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@ -828,8 +853,14 @@ func (c *Conn) SetDERPMap(dm *tailcfg.DERPMap) {
|
||||
}
|
||||
}
|
||||
|
||||
go c.ReSTUN("derp-map-update")
|
||||
if doReStun {
|
||||
if reSTUNHookForTests != nil {
|
||||
reSTUNHookForTests("derp-map-update")
|
||||
}
|
||||
go c.ReSTUN("derp-map-update")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) wantDerpLocked() bool { return c.derpMap != nil }
|
||||
|
||||
// c.mu must be held.
|
||||
|
||||
@ -4,9 +4,15 @@
|
||||
package magicsock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/net/netcheck"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/util/eventbus"
|
||||
"tailscale.com/util/eventbus/eventbustest"
|
||||
)
|
||||
|
||||
func CheckDERPHeuristicTimes(t *testing.T) {
|
||||
@ -14,3 +20,111 @@ func CheckDERPHeuristicTimes(t *testing.T) {
|
||||
t.Errorf("PreferredDERPFrameTime too low; should be at least frameReceiveRecordRate")
|
||||
}
|
||||
}
|
||||
|
||||
func TestForceSetNearestDERP(t *testing.T) {
|
||||
derpMap := &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
7: {
|
||||
RegionID: 7,
|
||||
RegionCode: "test",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{
|
||||
Name: "7a",
|
||||
RegionID: 7,
|
||||
HostName: "derp7.test.unused",
|
||||
IPv4: "127.0.0.1",
|
||||
IPv6: "none",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Force the real control health check so we can verify force=true bypasses it.
|
||||
tstest.Replace(t, &checkControlHealthDuringNearestDERPInTests, true)
|
||||
|
||||
bus := eventbustest.NewBus(t)
|
||||
ht := health.NewTracker(bus)
|
||||
c := newConn(t.Logf)
|
||||
ec := bus.Client("magicsock.Conn.Test")
|
||||
c.eventClient = ec
|
||||
c.homeDERPChangedPub = eventbus.Publish[HomeDERPChanged](ec)
|
||||
c.eventBus = bus
|
||||
c.derpMap = derpMap
|
||||
c.health = ht
|
||||
|
||||
ht.SetOutOfPollNetMap()
|
||||
|
||||
tw := eventbustest.NewWatcher(t, bus)
|
||||
|
||||
got := c.ForceSetNearestDERP(7)
|
||||
if got != 7 {
|
||||
t.Fatalf("ForceSetNearestDERP(7) = %d, want 7", got)
|
||||
}
|
||||
if c.myDerp != 7 {
|
||||
t.Errorf("c.myDerp = %d after ForceSetNearestDERP, want 7", c.myDerp)
|
||||
}
|
||||
|
||||
if err := eventbustest.Expect(tw, func(e HomeDERPChanged) error {
|
||||
if e.Old != 0 || e.New != 7 {
|
||||
return fmt.Errorf("got HomeDERPChanged{Old:%d, New:%d}, want {Old:0, New:7}", e.Old, e.New)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Errorf("expected HomeDERPChanged event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDERPMapDoReStun(t *testing.T) {
|
||||
derpMap1 := &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
RegionID: 1,
|
||||
RegionCode: "cph",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "1a", RegionID: 1, HostName: "cph.test.unused", IPv4: "127.0.0.1", IPv6: "none"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
derpMap2 := &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
2: {
|
||||
RegionID: 2,
|
||||
RegionCode: "inc",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "2a", RegionID: 2, HostName: "inc.test.unused", IPv4: "127.0.0.1", IPv6: "none"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var reSTUNCalls int
|
||||
tstest.Replace(t, &reSTUNHookForTests, func(_ string) {
|
||||
reSTUNCalls++
|
||||
})
|
||||
|
||||
bus := eventbustest.NewBus(t)
|
||||
ht := health.NewTracker(bus)
|
||||
c := newConn(t.Logf)
|
||||
ec := bus.Client("magicsock.Conn.Test")
|
||||
c.eventClient = ec
|
||||
c.homeDERPChangedPub = eventbus.Publish[HomeDERPChanged](ec)
|
||||
c.eventBus = bus
|
||||
c.health = ht
|
||||
// With a zero private key and everHadKey=true, ReSTUN returns early without
|
||||
// spawning updateEndpoints.
|
||||
c.everHadKey = true
|
||||
|
||||
// Should not trigger a ReSTUN.
|
||||
c.SetDERPMap(derpMap1, false)
|
||||
if reSTUNCalls != 0 {
|
||||
t.Errorf("SetDERPMap(dm, doReStun=false): got %d ReSTUN calls, want 0", reSTUNCalls)
|
||||
}
|
||||
|
||||
// doReStun=true: should trigger a ReSTUN.
|
||||
c.SetDERPMap(derpMap2, true)
|
||||
if reSTUNCalls != 1 {
|
||||
t.Errorf("SetDERPMap(dm, doReStun=true): got %d ReSTUN calls, want 1", reSTUNCalls)
|
||||
}
|
||||
}
|
||||
|
||||
@ -183,6 +183,7 @@ type Conn struct {
|
||||
allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq]
|
||||
portUpdatePub *eventbus.Publisher[router.PortUpdate]
|
||||
tsmpDiscoKeyAvailablePub *eventbus.Publisher[NewDiscoKeyAvailable]
|
||||
homeDERPChangedPub *eventbus.Publisher[HomeDERPChanged]
|
||||
|
||||
// pconn4 and pconn6 are the underlying UDP sockets used to
|
||||
// send/receive packets for wireguard and other magicsock
|
||||
@ -657,6 +658,7 @@ func NewConn(opts Options) (*Conn, error) {
|
||||
c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](ec)
|
||||
c.portUpdatePub = eventbus.Publish[router.PortUpdate](ec)
|
||||
c.tsmpDiscoKeyAvailablePub = eventbus.Publish[NewDiscoKeyAvailable](ec)
|
||||
c.homeDERPChangedPub = eventbus.Publish[HomeDERPChanged](ec)
|
||||
eventbus.SubscribeFunc(ec, c.onPortMapChanged)
|
||||
eventbus.SubscribeFunc(ec, c.onUDPRelayAllocResp)
|
||||
|
||||
@ -1056,7 +1058,7 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) {
|
||||
ni.OSHasIPv6.Set(report.OSHasIPv6)
|
||||
ni.WorkingUDP.Set(report.UDP)
|
||||
ni.WorkingICMPv4.Set(report.ICMPv4)
|
||||
ni.PreferredDERP = c.maybeSetNearestDERP(report)
|
||||
ni.PreferredDERP = c.maybeSetNearestDERP(report, false)
|
||||
ni.FirewallMode = hostinfo.FirewallMode()
|
||||
|
||||
c.callNetInfoCallback(ni)
|
||||
|
||||
@ -203,7 +203,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, ln nettype.PacketListe
|
||||
if err != nil {
|
||||
t.Fatalf("constructing magicsock: %v", err)
|
||||
}
|
||||
conn.SetDERPMap(derpMap)
|
||||
conn.SetDERPMap(derpMap, true)
|
||||
if err := conn.SetPrivateKey(privateKey); err != nil {
|
||||
t.Fatalf("setting private key in magicsock: %v", err)
|
||||
}
|
||||
@ -435,7 +435,7 @@ func TestNewConn(t *testing.T) {
|
||||
t.Fatal("LocalPort returned 0")
|
||||
}
|
||||
|
||||
conn.SetDERPMap(stuntest.DERPMapOf(stunAddr.String()))
|
||||
conn.SetDERPMap(stuntest.DERPMapOf(stunAddr.String()), true)
|
||||
conn.SetPrivateKey(key.NewNode())
|
||||
|
||||
go func() {
|
||||
@ -567,7 +567,7 @@ func TestDERPActiveFuncCalledAfterConnect(t *testing.T) {
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
conn.SetDERPMap(derpMap)
|
||||
conn.SetDERPMap(derpMap, true)
|
||||
if err := conn.SetPrivateKey(key.NewNode()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -3080,6 +3080,7 @@ func TestMaybeSetNearestDERP(t *testing.T) {
|
||||
old int
|
||||
reportDERP int
|
||||
connectedToControl bool
|
||||
force bool
|
||||
want int
|
||||
}{
|
||||
{
|
||||
@ -3103,6 +3104,22 @@ func TestMaybeSetNearestDERP(t *testing.T) {
|
||||
connectedToControl: false, // not connected...
|
||||
want: 21, // ... but want to change to new DERP
|
||||
},
|
||||
{
|
||||
name: "force_not_connected_with_report_derp",
|
||||
old: 1,
|
||||
reportDERP: 21,
|
||||
connectedToControl: false,
|
||||
force: true,
|
||||
want: 21, // force bypasses the no-change-without-control guard
|
||||
},
|
||||
{
|
||||
name: "force_not_connected_no_derp_no_current",
|
||||
old: 0,
|
||||
reportDERP: 0,
|
||||
connectedToControl: false,
|
||||
force: true,
|
||||
want: 31, // force + no report DERP → deterministic fallback
|
||||
},
|
||||
{
|
||||
name: "not_connected_with_fallback_and_no_current",
|
||||
old: 0, // no current DERP
|
||||
@ -3127,8 +3144,13 @@ func TestMaybeSetNearestDERP(t *testing.T) {
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ht := health.NewTracker(eventbustest.NewBus(t))
|
||||
bus := eventbustest.NewBus(t)
|
||||
ht := health.NewTracker(bus)
|
||||
c := newConn(t.Logf)
|
||||
ec := bus.Client("magicsock.Conn.Test")
|
||||
c.eventClient = ec
|
||||
c.homeDERPChangedPub = eventbus.Publish[HomeDERPChanged](ec)
|
||||
c.eventBus = bus
|
||||
c.myDerp = tt.old
|
||||
c.derpMap = derpMap
|
||||
c.health = ht
|
||||
@ -3146,7 +3168,7 @@ func TestMaybeSetNearestDERP(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
got := c.maybeSetNearestDERP(report)
|
||||
got := c.maybeSetNearestDERP(report, tt.force)
|
||||
if got != tt.want {
|
||||
t.Errorf("got new DERP region %d, want %d", got, tt.want)
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user