derp/derpserver: split off derp.Server out of derp into its own package

This exports a number of things from the derp (generic + client) package
to be used by the new derpserver package, as now used by cmd/derper.

And then enough other misc changes to lock in that cmd/tailscaled can
be configured to not bring in tailscale.com/client/local. (The webclient
in particular, even when disabled, was bringing it in, so that's now fixed)

Fixes #17257

Change-Id: I88b6c7958643fb54f386dd900bddf73d2d4d96d5
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick 2025-09-23 17:07:48 -07:00 committed by Brad Fitzpatrick
parent df747f1c1b
commit 21dc5f4e21
35 changed files with 1442 additions and 1319 deletions

View File

@ -12,12 +12,12 @@ import (
"net/http"
"strings"
"tailscale.com/derp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/connectproxy"
)
// serveConnect handles a CONNECT request for ACE support.
func serveConnect(s *derp.Server, w http.ResponseWriter, r *http.Request) {
func serveConnect(s *derpserver.Server, w http.ResponseWriter, r *http.Request) {
if !*flagACEEnabled {
http.Error(w, "CONNECT not enabled", http.StatusForbidden)
return

View File

@ -22,8 +22,8 @@ import (
"testing"
"time"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/netmon"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
@ -131,9 +131,9 @@ func TestPinnedCertRawIP(t *testing.T) {
}
defer ln.Close()
ds := derp.NewServer(key.NewNode(), t.Logf)
ds := derpserver.NewServer(key.NewNode(), t.Logf)
derpHandler := derphttp.Handler(ds)
derpHandler := derpserver.Handler(ds)
mux := http.NewServeMux()
mux.Handle("/derp", derpHandler)

View File

@ -89,12 +89,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
tailscale.com from tailscale.com/version
💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+
tailscale.com/client/local from tailscale.com/derp
tailscale.com/client/local from tailscale.com/derp/derpserver
tailscale.com/client/tailscale/apitype from tailscale.com/client/local
tailscale.com/derp from tailscale.com/cmd/derper+
tailscale.com/derp/derpconst from tailscale.com/derp+
tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/cmd/derper
tailscale.com/disco from tailscale.com/derp
tailscale.com/derp/derpserver from tailscale.com/cmd/derper
tailscale.com/disco from tailscale.com/derp/derpserver
tailscale.com/drive from tailscale.com/client/local+
tailscale.com/envknob from tailscale.com/client/local+
tailscale.com/feature from tailscale.com/tsweb
@ -117,7 +118,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/net/sockstats from tailscale.com/derp/derphttp
tailscale.com/net/stun from tailscale.com/net/stunserver
tailscale.com/net/stunserver from tailscale.com/cmd/derper
L tailscale.com/net/tcpinfo from tailscale.com/derp
L tailscale.com/net/tcpinfo from tailscale.com/derp/derpserver
tailscale.com/net/tlsdial from tailscale.com/derp/derphttp
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/ipn+
@ -132,7 +133,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
W tailscale.com/tsconst from tailscale.com/net/netmon+
tailscale.com/tstime from tailscale.com/derp+
tailscale.com/tstime/mono from tailscale.com/tstime/rate
tailscale.com/tstime/rate from tailscale.com/derp
tailscale.com/tstime/rate from tailscale.com/derp/derpserver
tailscale.com/tsweb from tailscale.com/cmd/derper+
tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper
tailscale.com/tsweb/varz from tailscale.com/tsweb+
@ -167,7 +168,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/util/multierr from tailscale.com/health+
tailscale.com/util/nocasemaps from tailscale.com/types/ipproto
tailscale.com/util/rands from tailscale.com/tsweb
tailscale.com/util/set from tailscale.com/derp+
tailscale.com/util/set from tailscale.com/derp/derpserver+
tailscale.com/util/singleflight from tailscale.com/net/dnscache
tailscale.com/util/slicesx from tailscale.com/cmd/derper+
tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting
@ -180,7 +181,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/util/vizerror from tailscale.com/tailcfg+
W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+
W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+
tailscale.com/version from tailscale.com/derp+
tailscale.com/version from tailscale.com/cmd/derper+
tailscale.com/version/distro from tailscale.com/envknob+
tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap
golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert

View File

@ -40,8 +40,7 @@ import (
"github.com/tailscale/setec/client/setec"
"golang.org/x/time/rate"
"tailscale.com/atomicfile"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/metrics"
"tailscale.com/net/ktimeout"
"tailscale.com/net/stunserver"
@ -90,7 +89,7 @@ var (
// tcpUserTimeout is intentionally short, so that hung connections are cleaned up promptly. DERPs should be nearby users.
tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout")
// tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections.
tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes")
tcpWriteTimeout = flag.Duration("tcp-write-timeout", derpserver.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes")
// ACE
flagACEEnabled = flag.Bool("ace", false, "whether to enable embedded ACE server [experimental + in-development as of 2025-09-12; not yet documented]")
@ -189,7 +188,7 @@ func main() {
serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual"
s := derp.NewServer(cfg.PrivateKey, log.Printf)
s := derpserver.NewServer(cfg.PrivateKey, log.Printf)
s.SetVerifyClient(*verifyClients)
s.SetTailscaledSocketPath(*socket)
s.SetVerifyClientURL(*verifyClientURL)
@ -256,7 +255,7 @@ func main() {
mux := http.NewServeMux()
if *runDERP {
derpHandler := derphttp.Handler(s)
derpHandler := derpserver.Handler(s)
derpHandler = addWebSocketSupport(s, derpHandler)
mux.Handle("/derp", derpHandler)
} else {
@ -267,8 +266,8 @@ func main() {
// These two endpoints are the same. Different versions of the clients
// have assumes different paths over time so we support both.
mux.HandleFunc("/derp/probe", derphttp.ProbeHandler)
mux.HandleFunc("/derp/latency-check", derphttp.ProbeHandler)
mux.HandleFunc("/derp/probe", derpserver.ProbeHandler)
mux.HandleFunc("/derp/latency-check", derpserver.ProbeHandler)
go refreshBootstrapDNSLoop()
mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS))
@ -280,7 +279,7 @@ func main() {
tsweb.AddBrowserHeaders(w)
io.WriteString(w, "User-agent: *\nDisallow: /\n")
}))
mux.Handle("/generate_204", http.HandlerFunc(derphttp.ServeNoContent))
mux.Handle("/generate_204", http.HandlerFunc(derpserver.ServeNoContent))
debug := tsweb.Debugger(mux)
debug.KV("TLS hostname", *hostname)
debug.KV("Mesh key", s.HasMeshKey())
@ -388,7 +387,7 @@ func main() {
if *httpPort > -1 {
go func() {
port80mux := http.NewServeMux()
port80mux.HandleFunc("/generate_204", derphttp.ServeNoContent)
port80mux.HandleFunc("/generate_204", derpserver.ServeNoContent)
port80mux.Handle("/", certManager.HTTPHandler(tsweb.Port80Handler{Main: mux}))
port80srv := &http.Server{
Addr: net.JoinHostPort(listenHost, fmt.Sprintf("%d", *httpPort)),

View File

@ -11,7 +11,7 @@ import (
"strings"
"testing"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/tstest/deptest"
)
@ -78,20 +78,20 @@ func TestNoContent(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil)
if tt.input != "" {
req.Header.Set(derphttp.NoContentChallengeHeader, tt.input)
req.Header.Set(derpserver.NoContentChallengeHeader, tt.input)
}
w := httptest.NewRecorder()
derphttp.ServeNoContent(w, req)
derpserver.ServeNoContent(w, req)
resp := w.Result()
if tt.want == "" {
if h, found := resp.Header[derphttp.NoContentResponseHeader]; found {
if h, found := resp.Header[derpserver.NoContentResponseHeader]; found {
t.Errorf("got %+v; expected no response header", h)
}
return
}
if got := resp.Header.Get(derphttp.NoContentResponseHeader); got != tt.want {
if got := resp.Header.Get(derpserver.NoContentResponseHeader); got != tt.want {
t.Errorf("got %q; want %q", got, tt.want)
}
})

View File

@ -13,11 +13,12 @@ import (
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/netmon"
"tailscale.com/types/logger"
)
func startMesh(s *derp.Server) error {
func startMesh(s *derpserver.Server) error {
if *meshWith == "" {
return nil
}
@ -32,7 +33,7 @@ func startMesh(s *derp.Server) error {
return nil
}
func startMeshWithHost(s *derp.Server, hostTuple string) error {
func startMeshWithHost(s *derpserver.Server, hostTuple string) error {
var host string
var dialHost string
hostParts := strings.Split(hostTuple, "/")

View File

@ -11,14 +11,14 @@ import (
"strings"
"github.com/coder/websocket"
"tailscale.com/derp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/wsconn"
)
var counterWebSocketAccepts = expvar.NewInt("derp_websocket_accepts")
// addWebSocketSupport returns a Handle wrapping base that adds WebSocket server support.
func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler {
func addWebSocketSupport(s *derpserver.Server, base http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
up := strings.ToLower(r.Header.Get("Upgrade"))

View File

@ -784,9 +784,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/control/controlknobs from tailscale.com/control/controlclient+
tailscale.com/derp from tailscale.com/derp/derphttp+
tailscale.com/derp/derpconst from tailscale.com/derp+
tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+
tailscale.com/disco from tailscale.com/derp+
tailscale.com/disco from tailscale.com/net/tstun+
tailscale.com/doctor from tailscale.com/ipn/ipnlocal
tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal
💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal
@ -839,7 +839,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/logtail from tailscale.com/control/controlclient+
tailscale.com/logtail/backoff from tailscale.com/control/controlclient+
tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+
tailscale.com/metrics from tailscale.com/derp+
tailscale.com/metrics from tailscale.com/health+
tailscale.com/net/ace from tailscale.com/control/controlhttp
tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+
💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock
@ -875,7 +875,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/net/socks5 from tailscale.com/tsnet
tailscale.com/net/sockstats from tailscale.com/control/controlclient+
tailscale.com/net/stun from tailscale.com/ipn/localapi+
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/control/controlclient+
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
@ -902,7 +901,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+
tailscale.com/tstime from tailscale.com/cmd/k8s-operator+
tailscale.com/tstime/mono from tailscale.com/net/tstun+
tailscale.com/tstime/rate from tailscale.com/derp+
tailscale.com/tstime/rate from tailscale.com/wgengine/filter
tailscale.com/tsweb from tailscale.com/util/eventbus
tailscale.com/tsweb/varz from tailscale.com/util/usermetric+
tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal
@ -1217,7 +1216,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
math/big from crypto/dsa+
math/bits from compress/flate+
math/rand from github.com/google/go-cmp/cmp+
math/rand/v2 from tailscale.com/derp+
math/rand/v2 from crypto/ecdsa+
mime from github.com/prometheus/common/expfmt+
mime/multipart from github.com/go-openapi/swag+
mime/quotedprintable from mime/multipart

View File

@ -96,9 +96,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli
tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/derp from tailscale.com/derp/derphttp+
tailscale.com/derp/derpconst from tailscale.com/derp+
tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/net/netcheck
tailscale.com/disco from tailscale.com/derp
tailscale.com/drive from tailscale.com/client/local+
tailscale.com/envknob from tailscale.com/client/local+
tailscale.com/envknob/featureknob from tailscale.com/client/web
@ -119,7 +118,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/ipn/ipnstate from tailscale.com/client/local+
tailscale.com/kube/kubetypes from tailscale.com/envknob
tailscale.com/licenses from tailscale.com/client/web+
tailscale.com/metrics from tailscale.com/derp+
tailscale.com/metrics from tailscale.com/health+
tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+
tailscale.com/net/bakedroots from tailscale.com/net/tlsdial
tailscale.com/net/captivedetection from tailscale.com/net/netcheck
@ -138,7 +137,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+
tailscale.com/net/sockstats from tailscale.com/control/controlhttp+
tailscale.com/net/stun from tailscale.com/net/netcheck
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
@ -153,7 +151,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/tsconst from tailscale.com/net/netmon+
tailscale.com/tstime from tailscale.com/control/controlhttp+
tailscale.com/tstime/mono from tailscale.com/tstime/rate
tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+
tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli
tailscale.com/tsweb from tailscale.com/util/eventbus
tailscale.com/tsweb/varz from tailscale.com/util/usermetric+
tailscale.com/types/dnstype from tailscale.com/tailcfg+
@ -193,7 +191,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli
tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli
tailscale.com/util/rands from tailscale.com/tsweb
tailscale.com/util/set from tailscale.com/derp+
tailscale.com/util/set from tailscale.com/ipn+
tailscale.com/util/singleflight from tailscale.com/net/dnscache
tailscale.com/util/slicesx from tailscale.com/client/systray+
L tailscale.com/util/stringsx from tailscale.com/client/systray
@ -358,7 +356,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
encoding/pem from crypto/tls+
encoding/xml from github.com/godbus/dbus/v5/introspect+
errors from archive/tar+
expvar from tailscale.com/derp+
expvar from tailscale.com/health+
flag from github.com/peterbourgon/ff/v3+
fmt from archive/tar+
hash from compress/zlib+
@ -431,7 +429,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
math/big from crypto/dsa+
math/bits from compress/flate+
math/rand from github.com/mdlayher/netlink+
math/rand/v2 from tailscale.com/derp+
math/rand/v2 from crypto/ecdsa+
mime from golang.org/x/oauth2/internal+
mime/multipart from net/http
mime/quotedprintable from mime/multipart

View File

@ -256,9 +256,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/control/controlknobs from tailscale.com/control/controlclient+
tailscale.com/derp from tailscale.com/derp/derphttp+
tailscale.com/derp/derpconst from tailscale.com/derp+
tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+
tailscale.com/disco from tailscale.com/derp+
tailscale.com/disco from tailscale.com/feature/relayserver+
tailscale.com/doctor from tailscale.com/ipn/ipnlocal
tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal
💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal
@ -314,7 +314,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/logtail from tailscale.com/cmd/tailscaled+
tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+
tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+
tailscale.com/metrics from tailscale.com/derp+
tailscale.com/metrics from tailscale.com/health+
tailscale.com/net/ace from tailscale.com/control/controlhttp
tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+
💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+
@ -349,7 +349,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled
tailscale.com/net/sockstats from tailscale.com/control/controlclient+
tailscale.com/net/stun from tailscale.com/ipn/localapi+
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/control/controlclient+
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
@ -378,7 +377,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/tsd from tailscale.com/cmd/tailscaled+
tailscale.com/tstime from tailscale.com/control/controlclient+
tailscale.com/tstime/mono from tailscale.com/net/tstun+
tailscale.com/tstime/rate from tailscale.com/derp+
tailscale.com/tstime/rate from tailscale.com/wgengine/filter
tailscale.com/tsweb from tailscale.com/util/eventbus
tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+
tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal
@ -432,7 +431,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/util/racebuild from tailscale.com/logpolicy
tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+
tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock
tailscale.com/util/set from tailscale.com/derp+
tailscale.com/util/set from tailscale.com/control/controlclient+
tailscale.com/util/singleflight from tailscale.com/control/controlclient+
tailscale.com/util/slicesx from tailscale.com/appc+
tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy
@ -613,7 +612,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
encoding/pem from crypto/tls+
encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+
errors from archive/tar+
expvar from tailscale.com/derp+
expvar from tailscale.com/cmd/tailscaled+
flag from tailscale.com/cmd/tailscaled+
fmt from archive/tar+
hash from compress/zlib+

View File

@ -44,6 +44,17 @@ func TestOmitSyspolicy(t *testing.T) {
}.Check(t)
}
func TestOmitLocalClient(t *testing.T) {
deptest.DepChecker{
GOOS: "linux",
GOARCH: "amd64",
Tags: "ts_omit_webclient,ts_omit_relayserver,ts_omit_oauthkey,ts_omit_acme",
BadDeps: map[string]string{
"tailscale.com/client/local": "unexpected",
},
}.Check(t)
}
// Test that we can build a binary without reflect.MethodByName.
// See https://github.com/tailscale/tailscale/issues/17063
func TestOmitReflectThings(t *testing.T) {

View File

@ -30,7 +30,6 @@ import (
"syscall"
"time"
"tailscale.com/client/local"
"tailscale.com/cmd/tailscaled/childproc"
"tailscale.com/control/controlclient"
"tailscale.com/envknob"
@ -685,16 +684,17 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID
if root := lb.TailscaleVarRoot(); root != "" {
dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"), logf)
}
lb.ConfigureWebClient(&local.Client{
Socket: args.socketpath,
UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(),
})
if f, ok := hookConfigureWebClient.GetOk(); ok {
f(lb)
}
if err := ns.Start(lb); err != nil {
log.Fatalf("failed to start netstack: %v", err)
}
return lb, nil
}
var hookConfigureWebClient feature.Hook[func(*ipnlocal.LocalBackend)]
// createEngine tries to the wgengine.Engine based on the order of tunnels
// specified in the command line flags.
//

View File

@ -0,0 +1,21 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ts_omit_webclient
package main
import (
"tailscale.com/client/local"
"tailscale.com/ipn/ipnlocal"
"tailscale.com/paths"
)
func init() {
hookConfigureWebClient.Set(func(lb *ipnlocal.LocalBackend) {
lb.ConfigureWebClient(&local.Client{
Socket: args.socketpath,
UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(),
})
})
}

View File

@ -226,9 +226,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar
tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/control/controlknobs from tailscale.com/control/controlclient+
tailscale.com/derp from tailscale.com/derp/derphttp+
tailscale.com/derp/derpconst from tailscale.com/derp+
tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+
tailscale.com/disco from tailscale.com/derp+
tailscale.com/disco from tailscale.com/net/tstun+
tailscale.com/doctor from tailscale.com/ipn/ipnlocal
tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal
💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal
@ -270,7 +270,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar
tailscale.com/logtail from tailscale.com/control/controlclient+
tailscale.com/logtail/backoff from tailscale.com/control/controlclient+
tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+
tailscale.com/metrics from tailscale.com/derp+
tailscale.com/metrics from tailscale.com/health+
tailscale.com/net/ace from tailscale.com/control/controlhttp
tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+
💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock
@ -306,7 +306,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar
tailscale.com/net/socks5 from tailscale.com/tsnet
tailscale.com/net/sockstats from tailscale.com/control/controlclient+
tailscale.com/net/stun from tailscale.com/ipn/localapi+
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/control/controlclient+
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
@ -332,7 +331,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar
tailscale.com/tsnet from tailscale.com/cmd/tsidp
tailscale.com/tstime from tailscale.com/control/controlclient+
tailscale.com/tstime/mono from tailscale.com/net/tstun+
tailscale.com/tstime/rate from tailscale.com/derp+
tailscale.com/tstime/rate from tailscale.com/wgengine/filter
tailscale.com/tsweb from tailscale.com/util/eventbus
tailscale.com/tsweb/varz from tailscale.com/tsweb+
tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal
@ -566,7 +565,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar
encoding/pem from crypto/tls+
encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+
errors from archive/tar+
expvar from tailscale.com/derp+
expvar from tailscale.com/health+
flag from tailscale.com/cmd/tsidp+
fmt from archive/tar+
hash from compress/zlib+

235
derp/client_test.go Normal file
View File

@ -0,0 +1,235 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derp
import (
"bufio"
"bytes"
"io"
"net"
"reflect"
"sync"
"testing"
"time"
"tailscale.com/tstest"
"tailscale.com/types/key"
)
type dummyNetConn struct {
net.Conn
}
func (dummyNetConn) SetReadDeadline(time.Time) error { return nil }
func TestClientRecv(t *testing.T) {
tests := []struct {
name string
input []byte
want any
}{
{
name: "ping",
input: []byte{
byte(FramePing), 0, 0, 0, 8,
1, 2, 3, 4, 5, 6, 7, 8,
},
want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8},
},
{
name: "pong",
input: []byte{
byte(FramePong), 0, 0, 0, 8,
1, 2, 3, 4, 5, 6, 7, 8,
},
want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8},
},
{
name: "health_bad",
input: []byte{
byte(FrameHealth), 0, 0, 0, 3,
byte('B'), byte('A'), byte('D'),
},
want: HealthMessage{Problem: "BAD"},
},
{
name: "health_ok",
input: []byte{
byte(FrameHealth), 0, 0, 0, 0,
},
want: HealthMessage{},
},
{
name: "server_restarting",
input: []byte{
byte(FrameRestarting), 0, 0, 0, 8,
0, 0, 0, 1,
0, 0, 0, 2,
},
want: ServerRestartingMessage{
ReconnectIn: 1 * time.Millisecond,
TryFor: 2 * time.Millisecond,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Client{
nc: dummyNetConn{},
br: bufio.NewReader(bytes.NewReader(tt.input)),
logf: t.Logf,
clock: &tstest.Clock{},
}
got, err := c.Recv()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("got %#v; want %#v", got, tt.want)
}
})
}
}
func TestClientSendPing(t *testing.T) {
var buf bytes.Buffer
c := &Client{
bw: bufio.NewWriter(&buf),
}
if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {
t.Fatal(err)
}
want := []byte{
byte(FramePing), 0, 0, 0, 8,
1, 2, 3, 4, 5, 6, 7, 8,
}
if !bytes.Equal(buf.Bytes(), want) {
t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want)
}
}
func TestClientSendPong(t *testing.T) {
var buf bytes.Buffer
c := &Client{
bw: bufio.NewWriter(&buf),
}
if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {
t.Fatal(err)
}
want := []byte{
byte(FramePong), 0, 0, 0, 8,
1, 2, 3, 4, 5, 6, 7, 8,
}
if !bytes.Equal(buf.Bytes(), want) {
t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want)
}
}
func BenchmarkWriteUint32(b *testing.B) {
w := bufio.NewWriter(io.Discard)
b.ReportAllocs()
b.ResetTimer()
for range b.N {
writeUint32(w, 0x0ba3a)
}
}
type nopRead struct{}
func (r nopRead) Read(p []byte) (int, error) {
return len(p), nil
}
var sinkU32 uint32
func BenchmarkReadUint32(b *testing.B) {
r := bufio.NewReader(nopRead{})
var err error
b.ReportAllocs()
b.ResetTimer()
for range b.N {
sinkU32, err = readUint32(r)
if err != nil {
b.Fatal(err)
}
}
}
type countWriter struct {
mu sync.Mutex
writes int
bytes int64
}
func (w *countWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
w.writes++
w.bytes += int64(len(p))
return len(p), nil
}
func (w *countWriter) Stats() (writes int, bytes int64) {
w.mu.Lock()
defer w.mu.Unlock()
return w.writes, w.bytes
}
func (w *countWriter) ResetStats() {
w.mu.Lock()
defer w.mu.Unlock()
w.writes, w.bytes = 0, 0
}
func TestClientSendRateLimiting(t *testing.T) {
cw := new(countWriter)
c := &Client{
bw: bufio.NewWriter(cw),
clock: &tstest.Clock{},
}
c.setSendRateLimiter(ServerInfoMessage{})
pkt := make([]byte, 1000)
if err := c.send(key.NodePublic{}, pkt); err != nil {
t.Fatal(err)
}
writes1, bytes1 := cw.Stats()
if writes1 != 1 {
t.Errorf("writes = %v, want 1", writes1)
}
// Flood should all succeed.
cw.ResetStats()
for range 1000 {
if err := c.send(key.NodePublic{}, pkt); err != nil {
t.Fatal(err)
}
}
writes1K, bytes1K := cw.Stats()
if writes1K != 1000 {
t.Logf("writes = %v; want 1000", writes1K)
}
if got, want := bytes1K, bytes1*1000; got != want {
t.Logf("bytes = %v; want %v", got, want)
}
// Set a rate limiter
cw.ResetStats()
c.setSendRateLimiter(ServerInfoMessage{
TokenBucketBytesPerSecond: 1,
TokenBucketBytesBurst: int(bytes1 * 2),
})
for range 1000 {
if err := c.send(key.NodePublic{}, pkt); err != nil {
t.Fatal(err)
}
}
writesLimited, bytesLimited := cw.Stats()
if writesLimited == 0 || writesLimited == writes1K {
t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited)
}
if bytesLimited < bytes1*2 || bytesLimited >= bytes1K {
t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K)
}
}

View File

@ -27,15 +27,15 @@ import (
// including its on-wire framing overhead)
const MaxPacketSize = 64 << 10
// magic is the DERP magic number, sent in the frameServerKey frame
// Magic is the DERP Magic number, sent in the frameServerKey frame
// upon initial connection.
const magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91
const Magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91
const (
nonceLen = 24
frameHeaderLen = 1 + 4 // frameType byte + 4 byte length
keyLen = 32
maxInfoLen = 1 << 20
NonceLen = 24
FrameHeaderLen = 1 + 4 // frameType byte + 4 byte length
KeyLen = 32
MaxInfoLen = 1 << 20
)
// KeepAlive is the minimum frequency at which the DERP server sends
@ -48,10 +48,10 @@ const KeepAlive = 60 * time.Second
// - version 2: received packets have src addrs in frameRecvPacket at beginning
const ProtocolVersion = 2
// frameType is the one byte frame type at the beginning of the frame
// FrameType is the one byte frame type at the beginning of the frame
// header. The second field is a big-endian uint32 describing the
// length of the remaining frame (not including the initial 5 bytes).
type frameType byte
type FrameType byte
/*
Protocol flow:
@ -69,14 +69,14 @@ Steady state:
* server then sends frameRecvPacket to recipient
*/
const (
frameServerKey = frameType(0x01) // 8B magic + 32B public key + (0+ bytes future use)
frameClientInfo = frameType(0x02) // 32B pub key + 24B nonce + naclbox(json)
frameServerInfo = frameType(0x03) // 24B nonce + naclbox(json)
frameSendPacket = frameType(0x04) // 32B dest pub key + packet bytes
frameForwardPacket = frameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes
frameRecvPacket = frameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes
frameKeepAlive = frameType(0x06) // no payload, no-op (to be replaced with ping/pong)
frameNotePreferred = frameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node
FrameServerKey = FrameType(0x01) // 8B magic + 32B public key + (0+ bytes future use)
FrameClientInfo = FrameType(0x02) // 32B pub key + 24B nonce + naclbox(json)
FrameServerInfo = FrameType(0x03) // 24B nonce + naclbox(json)
FrameSendPacket = FrameType(0x04) // 32B dest pub key + packet bytes
FrameForwardPacket = FrameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes
FrameRecvPacket = FrameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes
FrameKeepAlive = FrameType(0x06) // no payload, no-op (to be replaced with ping/pong)
FrameNotePreferred = FrameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node
// framePeerGone is sent from server to client to signal that
// a previous sender is no longer connected. That is, if A
@ -85,7 +85,7 @@ const (
// exists on that connection to get back to A. It is also sent
// if A tries to send a CallMeMaybe to B and the server has no
// record of B
framePeerGone = frameType(0x08) // 32B pub key of peer that's gone + 1 byte reason
FramePeerGone = FrameType(0x08) // 32B pub key of peer that's gone + 1 byte reason
// framePeerPresent is like framePeerGone, but for other members of the DERP
// region when they're meshed up together.
@ -96,7 +96,7 @@ const (
// remaining after that, it's a PeerPresentFlags byte.
// While current servers send 41 bytes, old servers will send fewer, and newer
// servers might send more.
framePeerPresent = frameType(0x09)
FramePeerPresent = FrameType(0x09)
// frameWatchConns is how one DERP node in a regional mesh
// subscribes to the others in the region.
@ -104,30 +104,30 @@ const (
// is closed. Otherwise, the client is initially flooded with
// framePeerPresent for all connected nodes, and then a stream of
// framePeerPresent & framePeerGone has peers connect and disconnect.
frameWatchConns = frameType(0x10)
FrameWatchConns = FrameType(0x10)
// frameClosePeer is a privileged frame type (requires the
// mesh key for now) that closes the provided peer's
// connection. (To be used for cluster load balancing
// purposes, when clients end up on a non-ideal node)
frameClosePeer = frameType(0x11) // 32B pub key of peer to close.
FrameClosePeer = FrameType(0x11) // 32B pub key of peer to close.
framePing = frameType(0x12) // 8 byte ping payload, to be echoed back in framePong
framePong = frameType(0x13) // 8 byte payload, the contents of the ping being replied to
FramePing = FrameType(0x12) // 8 byte ping payload, to be echoed back in framePong
FramePong = FrameType(0x13) // 8 byte payload, the contents of the ping being replied to
// frameHealth is sent from server to client to tell the client
// if their connection is unhealthy somehow. Currently the only unhealthy state
// is whether the connection is detected as a duplicate.
// The entire frame body is the text of the error message. An empty message
// clears the error state.
frameHealth = frameType(0x14)
FrameHealth = FrameType(0x14)
// frameRestarting is sent from server to client for the
// server to declare that it's restarting. Payload is two big
// endian uint32 durations in milliseconds: when to reconnect,
// and how long to try total. See ServerRestartingMessage docs for
// more details on how the client should interpret them.
frameRestarting = frameType(0x15)
FrameRestarting = FrameType(0x15)
)
// PeerGoneReasonType is a one byte reason code explaining why a
@ -154,6 +154,18 @@ const (
PeerPresentNotIdeal = 1 << 3 // client said derp server is not its Region.Nodes[0] ideal node
)
// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests
// to indicate that they're connecting to their ideal (Region.Nodes[0]) node.
// The HTTP header value is the name of the node they wish they were connected
// to. This is an optional header.
const IdealNodeHeader = "Ideal-Node"
// FastStartHeader is the header (with value "1") that signals to the HTTP
// server that the DERP HTTP client does not want the HTTP 101 response
// headers and it will begin writing & reading the DERP protocol immediately
// following its HTTP request.
const FastStartHeader = "Derp-Fast-Start"
var bin = binary.BigEndian
func writeUint32(bw *bufio.Writer, v uint32) error {
@ -186,15 +198,24 @@ func readUint32(br *bufio.Reader) (uint32, error) {
return bin.Uint32(b[:]), nil
}
func readFrameTypeHeader(br *bufio.Reader, wantType frameType) (frameLen uint32, err error) {
gotType, frameLen, err := readFrameHeader(br)
// ReadFrameTypeHeader reads a frame header from br and
// verifies that the frame type matches wantType.
//
// If it does, it returns the frame length (not including
// the 5 byte header) and a nil error.
//
// If it doesn't, it returns an error and a zero length.
func ReadFrameTypeHeader(br *bufio.Reader, wantType FrameType) (frameLen uint32, err error) {
gotType, frameLen, err := ReadFrameHeader(br)
if err == nil && wantType != gotType {
err = fmt.Errorf("bad frame type 0x%X, want 0x%X", gotType, wantType)
}
return frameLen, err
}
func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) {
// ReadFrameHeader reads the header of a DERP frame,
// reading 5 bytes from br.
func ReadFrameHeader(br *bufio.Reader) (t FrameType, frameLen uint32, err error) {
tb, err := br.ReadByte()
if err != nil {
return 0, 0, err
@ -203,7 +224,7 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error)
if err != nil {
return 0, 0, err
}
return frameType(tb), frameLen, nil
return FrameType(tb), frameLen, nil
}
// readFrame reads a frame header and then reads its payload into
@ -216,8 +237,8 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error)
// bytes are read, err will be io.ErrShortBuffer, and frameLen and t
// will both be set. That is, callers need to explicitly handle when
// they get more data than expected.
func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLen uint32, err error) {
t, frameLen, err = readFrameHeader(br)
func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t FrameType, frameLen uint32, err error) {
t, frameLen, err = ReadFrameHeader(br)
if err != nil {
return 0, 0, err
}
@ -239,19 +260,26 @@ func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLe
return t, frameLen, err
}
func writeFrameHeader(bw *bufio.Writer, t frameType, frameLen uint32) error {
// WriteFrameHeader writes a frame header to bw.
//
// The frame header is 5 bytes: a one byte frame type
// followed by a big-endian uint32 length of the
// remaining frame (not including the 5 byte header).
//
// It does not flush bw.
func WriteFrameHeader(bw *bufio.Writer, t FrameType, frameLen uint32) error {
if err := bw.WriteByte(byte(t)); err != nil {
return err
}
return writeUint32(bw, frameLen)
}
// writeFrame writes a complete frame & flushes it.
func writeFrame(bw *bufio.Writer, t frameType, b []byte) error {
// WriteFrame writes a complete frame & flushes it.
func WriteFrame(bw *bufio.Writer, t FrameType, b []byte) error {
if len(b) > 10<<20 {
return errors.New("unreasonably large frame write")
}
if err := writeFrameHeader(bw, t, uint32(len(b))); err != nil {
if err := WriteFrameHeader(bw, t, uint32(len(b))); err != nil {
return err
}
if _, err := bw.Write(b); err != nil {
@ -270,3 +298,12 @@ type Conn interface {
SetReadDeadline(time.Time) error
SetWriteDeadline(time.Time) error
}
// ServerInfo is the message sent from the server to clients during
// the connection setup.
type ServerInfo struct {
Version int `json:"version,omitempty"`
TokenBucketBytesPerSecond int `json:",omitempty"`
TokenBucketBytesBurst int `json:",omitempty"`
}

View File

@ -133,17 +133,17 @@ func (c *Client) recvServerKey() error {
if err != nil {
return err
}
if flen < uint32(len(buf)) || t != frameServerKey || string(buf[:len(magic)]) != magic {
if flen < uint32(len(buf)) || t != FrameServerKey || string(buf[:len(Magic)]) != Magic {
return errors.New("invalid server greeting")
}
c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(magic):]))
c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(Magic):]))
return nil
}
func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) {
const maxLength = nonceLen + maxInfoLen
func (c *Client) parseServerInfo(b []byte) (*ServerInfo, error) {
const maxLength = NonceLen + MaxInfoLen
fl := len(b)
if fl < nonceLen {
if fl < NonceLen {
return nil, fmt.Errorf("short serverInfo frame")
}
if fl > maxLength {
@ -153,14 +153,16 @@ func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) {
if !ok {
return nil, fmt.Errorf("failed to open naclbox from server key %s", c.serverKey)
}
info := new(serverInfo)
info := new(ServerInfo)
if err := json.Unmarshal(msg, info); err != nil {
return nil, fmt.Errorf("invalid JSON: %v", err)
}
return info, nil
}
type clientInfo struct {
// ClientInfo is the information a DERP client sends to the server
// about itself when it connects.
type ClientInfo struct {
// MeshKey optionally specifies a pre-shared key used by
// trusted clients. It's required to subscribe to the
// connection list & forward packets. It's empty for regular
@ -180,7 +182,7 @@ type clientInfo struct {
}
// Equal reports if two clientInfo values are equal.
func (c *clientInfo) Equal(other *clientInfo) bool {
func (c *ClientInfo) Equal(other *ClientInfo) bool {
if c == nil || other == nil {
return c == other
}
@ -191,7 +193,7 @@ func (c *clientInfo) Equal(other *clientInfo) bool {
}
func (c *Client) sendClientKey() error {
msg, err := json.Marshal(clientInfo{
msg, err := json.Marshal(ClientInfo{
Version: ProtocolVersion,
MeshKey: c.meshKey,
CanAckPings: c.canAckPings,
@ -202,10 +204,10 @@ func (c *Client) sendClientKey() error {
}
msgbox := c.privateKey.SealTo(c.serverKey, msg)
buf := make([]byte, 0, keyLen+len(msgbox))
buf := make([]byte, 0, KeyLen+len(msgbox))
buf = c.publicKey.AppendTo(buf)
buf = append(buf, msgbox...)
return writeFrame(c.bw, frameClientInfo, buf)
return WriteFrame(c.bw, FrameClientInfo, buf)
}
// ServerPublicKey returns the server's public key.
@ -230,12 +232,12 @@ func (c *Client) send(dstKey key.NodePublic, pkt []byte) (ret error) {
c.wmu.Lock()
defer c.wmu.Unlock()
if c.rate != nil {
pktLen := frameHeaderLen + key.NodePublicRawLen + len(pkt)
pktLen := FrameHeaderLen + key.NodePublicRawLen + len(pkt)
if !c.rate.AllowN(c.clock.Now(), pktLen) {
return nil // drop
}
}
if err := writeFrameHeader(c.bw, frameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil {
if err := WriteFrameHeader(c.bw, FrameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil {
return err
}
if _, err := c.bw.Write(dstKey.AppendTo(nil)); err != nil {
@ -264,7 +266,7 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e
timer := c.clock.AfterFunc(5*time.Second, c.writeTimeoutFired)
defer timer.Stop()
if err := writeFrameHeader(c.bw, frameForwardPacket, uint32(keyLen*2+len(pkt))); err != nil {
if err := WriteFrameHeader(c.bw, FrameForwardPacket, uint32(KeyLen*2+len(pkt))); err != nil {
return err
}
if _, err := c.bw.Write(srcKey.AppendTo(nil)); err != nil {
@ -282,17 +284,17 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e
func (c *Client) writeTimeoutFired() { c.nc.Close() }
func (c *Client) SendPing(data [8]byte) error {
return c.sendPingOrPong(framePing, data)
return c.sendPingOrPong(FramePing, data)
}
func (c *Client) SendPong(data [8]byte) error {
return c.sendPingOrPong(framePong, data)
return c.sendPingOrPong(FramePong, data)
}
func (c *Client) sendPingOrPong(typ frameType, data [8]byte) error {
func (c *Client) sendPingOrPong(typ FrameType, data [8]byte) error {
c.wmu.Lock()
defer c.wmu.Unlock()
if err := writeFrameHeader(c.bw, typ, 8); err != nil {
if err := WriteFrameHeader(c.bw, typ, 8); err != nil {
return err
}
if _, err := c.bw.Write(data[:]); err != nil {
@ -314,7 +316,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) {
c.wmu.Lock()
defer c.wmu.Unlock()
if err := writeFrameHeader(c.bw, frameNotePreferred, 1); err != nil {
if err := WriteFrameHeader(c.bw, FrameNotePreferred, 1); err != nil {
return err
}
var b byte = 0x00
@ -332,7 +334,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) {
func (c *Client) WatchConnectionChanges() error {
c.wmu.Lock()
defer c.wmu.Unlock()
if err := writeFrameHeader(c.bw, frameWatchConns, 0); err != nil {
if err := WriteFrameHeader(c.bw, FrameWatchConns, 0); err != nil {
return err
}
return c.bw.Flush()
@ -343,7 +345,7 @@ func (c *Client) WatchConnectionChanges() error {
func (c *Client) ClosePeer(target key.NodePublic) error {
c.wmu.Lock()
defer c.wmu.Unlock()
return writeFrame(c.bw, frameClosePeer, target.AppendTo(nil))
return WriteFrame(c.bw, FrameClosePeer, target.AppendTo(nil))
}
// ReceivedMessage represents a type returned by Client.Recv. Unless
@ -502,7 +504,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro
c.peeked = 0
}
t, n, err := readFrameHeader(c.br)
t, n, err := ReadFrameHeader(c.br)
if err != nil {
return nil, err
}
@ -533,7 +535,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro
switch t {
default:
continue
case frameServerInfo:
case FrameServerInfo:
// Server sends this at start-up. Currently unused.
// Just has a JSON message saying "version: 2",
// but the protocol seems extensible enough as-is without
@ -550,29 +552,29 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro
}
c.setSendRateLimiter(sm)
return sm, nil
case frameKeepAlive:
case FrameKeepAlive:
// A one-way keep-alive message that doesn't require an acknowledgement.
// This predated framePing/framePong.
return KeepAliveMessage{}, nil
case framePeerGone:
if n < keyLen {
case FramePeerGone:
if n < KeyLen {
c.logf("[unexpected] dropping short peerGone frame from DERP server")
continue
}
// Backward compatibility for the older peerGone without reason byte
reason := PeerGoneReasonDisconnected
if n > keyLen {
reason = PeerGoneReasonType(b[keyLen])
if n > KeyLen {
reason = PeerGoneReasonType(b[KeyLen])
}
pg := PeerGoneMessage{
Peer: key.NodePublicFromRaw32(mem.B(b[:keyLen])),
Peer: key.NodePublicFromRaw32(mem.B(b[:KeyLen])),
Reason: reason,
}
return pg, nil
case framePeerPresent:
case FramePeerPresent:
remain := b
chunk, remain, ok := cutLeadingN(remain, keyLen)
chunk, remain, ok := cutLeadingN(remain, KeyLen)
if !ok {
c.logf("[unexpected] dropping short peerPresent frame from DERP server")
continue
@ -600,17 +602,17 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro
msg.Flags = PeerPresentFlags(chunk[0])
return msg, nil
case frameRecvPacket:
case FrameRecvPacket:
var rp ReceivedPacket
if n < keyLen {
if n < KeyLen {
c.logf("[unexpected] dropping short packet from DERP server")
continue
}
rp.Source = key.NodePublicFromRaw32(mem.B(b[:keyLen]))
rp.Data = b[keyLen:n]
rp.Source = key.NodePublicFromRaw32(mem.B(b[:KeyLen]))
rp.Data = b[KeyLen:n]
return rp, nil
case framePing:
case FramePing:
var pm PingMessage
if n < 8 {
c.logf("[unexpected] dropping short ping frame")
@ -619,7 +621,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro
copy(pm[:], b[:])
return pm, nil
case framePong:
case FramePong:
var pm PongMessage
if n < 8 {
c.logf("[unexpected] dropping short ping frame")
@ -628,10 +630,10 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro
copy(pm[:], b[:])
return pm, nil
case frameHealth:
case FrameHealth:
return HealthMessage{Problem: string(b[:])}, nil
case frameRestarting:
case FrameRestarting:
var m ServerRestartingMessage
if n < 8 {
c.logf("[unexpected] dropping short server restarting frame")

File diff suppressed because it is too large Load Diff

View File

@ -522,7 +522,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien
// just to get routed into the server's HTTP Handler so it
// can Hijack the request, but we signal with a special header
// that we don't want to deal with its HTTP response.
req.Header.Set(fastStartHeader, "1") // suppresses the server's HTTP response
req.Header.Set(derp.FastStartHeader, "1") // suppresses the server's HTTP response
if err := req.Write(brw); err != nil {
return nil, 0, err
}

View File

@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
package derphttp_test
import (
"bytes"
@ -21,9 +21,12 @@ import (
"time"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/netmon"
"tailscale.com/net/netx"
"tailscale.com/tailcfg"
"tailscale.com/tstest"
"tailscale.com/types/key"
)
@ -41,12 +44,12 @@ func TestSendRecv(t *testing.T) {
clientKeys = append(clientKeys, priv.Public())
}
s := derp.NewServer(serverPrivateKey, t.Logf)
s := derpserver.NewServer(serverPrivateKey, t.Logf)
defer s.Close()
httpsrv := &http.Server{
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: Handler(s),
Handler: derpserver.Handler(s),
}
ln, err := net.Listen("tcp4", "localhost:0")
@ -65,7 +68,7 @@ func TestSendRecv(t *testing.T) {
}
}()
var clients []*Client
var clients []*derphttp.Client
var recvChs []chan []byte
done := make(chan struct{})
var wg sync.WaitGroup
@ -78,7 +81,7 @@ func TestSendRecv(t *testing.T) {
}()
for i := range numClients {
key := clientPrivateKeys[i]
c, err := NewClient(key, serverURL, t.Logf, netMon)
c, err := derphttp.NewClient(key, serverURL, t.Logf, netMon)
if err != nil {
t.Fatalf("client %d: %v", i, err)
}
@ -158,7 +161,7 @@ func TestSendRecv(t *testing.T) {
recvNothing(1)
}
func waitConnect(t testing.TB, c *Client) {
func waitConnect(t testing.TB, c *derphttp.Client) {
t.Helper()
if m, err := c.Recv(); err != nil {
t.Fatalf("client first Recv: %v", err)
@ -169,12 +172,12 @@ func waitConnect(t testing.TB, c *Client) {
func TestPing(t *testing.T) {
serverPrivateKey := key.NewNode()
s := derp.NewServer(serverPrivateKey, t.Logf)
s := derpserver.NewServer(serverPrivateKey, t.Logf)
defer s.Close()
httpsrv := &http.Server{
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: Handler(s),
Handler: derpserver.Handler(s),
}
ln, err := net.Listen("tcp4", "localhost:0")
@ -193,7 +196,7 @@ func TestPing(t *testing.T) {
}
}()
c, err := NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic())
c, err := derphttp.NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic())
if err != nil {
t.Fatalf("NewClient: %v", err)
}
@ -221,11 +224,11 @@ func TestPing(t *testing.T) {
const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) {
s = derp.NewServer(k, t.Logf)
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) {
s = derpserver.NewServer(k, t.Logf)
httpsrv := &http.Server{
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: Handler(s),
Handler: derpserver.Handler(s),
}
ln, err := net.Listen("tcp4", "localhost:0")
@ -247,8 +250,8 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.S
return
}
func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *Client) {
c, err := NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic())
func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *derphttp.Client) {
c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic())
if err != nil {
t.Fatal(err)
}
@ -260,30 +263,16 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW
return
}
// breakConnection breaks the connection, which should trigger a reconnect.
func (c *Client) breakConnection(brokenClient *derp.Client) {
c.mu.Lock()
defer c.mu.Unlock()
if c.client != brokenClient {
return
}
if c.netConn != nil {
c.netConn.Close()
c.netConn = nil
}
c.client = nil
}
// Test that a watcher connection successfully reconnects and processes peer
// updates after a different thread breaks and reconnects the connection, while
// the watcher is waiting on recv().
func TestBreakWatcherConnRecv(t *testing.T) {
// TODO(bradfitz): use synctest + memnet instead
// Set the wait time before a retry after connection failure to be much lower.
// This needs to be early in the test, for defer to run right at the end after
// the DERP client has finished.
origRetryInterval := retryInterval
retryInterval = 50 * time.Millisecond
defer func() { retryInterval = origRetryInterval }()
tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond)
var wg sync.WaitGroup
// Make the watcher server
@ -301,11 +290,11 @@ func TestBreakWatcherConnRecv(t *testing.T) {
defer watcher.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
watcherChan := make(chan int, 1)
defer close(watcherChan)
errChan := make(chan error, 1)
defer close(errChan)
// Start the watcher thread (which connects to the watched server)
wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343
@ -320,7 +309,10 @@ func TestBreakWatcherConnRecv(t *testing.T) {
}
remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- }
notifyErr := func(err error) {
errChan <- err
select {
case errChan <- err:
case <-ctx.Done():
}
}
watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr)
@ -345,7 +337,7 @@ func TestBreakWatcherConnRecv(t *testing.T) {
t.Fatalf("watcher did not process the peer update")
}
timer.Reset(5 * time.Second)
watcher.breakConnection(watcher.client)
watcher.BreakConnection(watcher)
// re-establish connection by sending a packet
watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus"))
}
@ -357,12 +349,12 @@ func TestBreakWatcherConnRecv(t *testing.T) {
// updates after a different thread breaks and reconnects the connection, while
// the watcher is not waiting on recv().
func TestBreakWatcherConn(t *testing.T) {
// TODO(bradfitz): use synctest + memnet instead
// Set the wait time before a retry after connection failure to be much lower.
// This needs to be early in the test, for defer to run right at the end after
// the DERP client has finished.
origRetryInterval := retryInterval
retryInterval = 50 * time.Millisecond
defer func() { retryInterval = origRetryInterval }()
tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond)
var wg sync.WaitGroup
// Make the watcher server
@ -428,7 +420,7 @@ func TestBreakWatcherConn(t *testing.T) {
case <-timer.C:
t.Fatalf("watcher did not process the peer update")
}
watcher1.breakConnection(watcher1.client)
watcher1.BreakConnection(watcher1)
// re-establish connection by sending a packet
watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus"))
// signal that the breaker is done
@ -446,7 +438,7 @@ func noopRemove(derp.PeerGoneMessage) {}
func noopNotifyError(error) {}
func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
defer func() { testHookWatchLookConnectResult = nil }()
defer derphttp.SetTestHookWatchLookConnectResult(nil)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
@ -461,7 +453,7 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
defer watcher.Close()
// Test connecting to ourselves, and that we get hung up on.
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool {
t.Helper()
if err != nil {
t.Fatalf("error connecting to server: %v", err)
@ -470,12 +462,12 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
t.Error("wanted self-connect; wasn't")
}
return false
}
})
watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, noopNotifyError)
// Test connecting to the server with a zero value for ignoreServerKey,
// so we should always connect.
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool {
t.Helper()
if err != nil {
t.Fatalf("error connecting to server: %v", err)
@ -484,16 +476,14 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) {
t.Error("wanted normal connect; got self connect")
}
return false
}
})
watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove, noopNotifyError)
}
// verify that the LocalAddr method doesn't acquire the mutex.
// See https://github.com/tailscale/tailscale/issues/11519
func TestLocalAddrNoMutex(t *testing.T) {
var c Client
c.mu.Lock()
defer c.mu.Unlock() // not needed in test but for symmetry
var c derphttp.Client
_, err := c.LocalAddr()
if got, want := fmt.Sprint(err), "client not connected"; got != want {
@ -502,7 +492,7 @@ func TestLocalAddrNoMutex(t *testing.T) {
}
func TestProbe(t *testing.T) {
h := Handler(nil)
h := derpserver.Handler(nil)
tests := []struct {
path string
@ -523,7 +513,7 @@ func TestProbe(t *testing.T) {
}
func TestNotifyError(t *testing.T) {
defer func() { testHookWatchLookConnectResult = nil }()
defer derphttp.SetTestHookWatchLookConnectResult(nil)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
@ -541,7 +531,7 @@ func TestNotifyError(t *testing.T) {
}))
defer watcher.Close()
testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool {
derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool {
t.Helper()
if err == nil {
t.Fatal("expected error connecting to server, got nil")
@ -550,7 +540,7 @@ func TestNotifyError(t *testing.T) {
t.Error("wanted normal connect; got self connect")
}
return false
}
})
errChan := make(chan error, 1)
notifyError := func(err error) {
@ -587,7 +577,7 @@ func TestManualDial(t *testing.T) {
region := slices.Sorted(maps.Keys(dm.Regions))[0]
netMon := netmon.NewStatic()
rc := NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion {
rc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion {
return dm.Regions[region]
})
defer rc.Close()
@ -625,7 +615,7 @@ func TestURLDial(t *testing.T) {
}
}
netMon := netmon.NewStatic()
c, err := NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon)
c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon)
defer c.Close()
if err := c.Connect(context.Background()); err != nil {

View File

@ -0,0 +1,24 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
func SetTestHookWatchLookConnectResult(f func(connectError error, wasSelfConnect bool) (keepRunning bool)) {
testHookWatchLookConnectResult = f
}
// breakConnection breaks the connection, which should trigger a reconnect.
func (c *Client) BreakConnection(brokenClient *Client) {
c.mu.Lock()
defer c.mu.Unlock()
if c.client != brokenClient.client {
return
}
if c.netConn != nil {
c.netConn.Close()
c.netConn = nil
}
c.client = nil
}
var RetryInterval = &retryInterval

View File

@ -1,7 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derp
// Package derpserver implements a DERP server.
package derpserver
// TODO(crawshaw): with predefined serverKey in clients and HMAC on packets we could skip TLS
@ -38,6 +39,7 @@ import (
"go4.org/mem"
"golang.org/x/sync/errgroup"
"tailscale.com/client/local"
"tailscale.com/derp"
"tailscale.com/derp/derpconst"
"tailscale.com/disco"
"tailscale.com/envknob"
@ -55,19 +57,15 @@ import (
"tailscale.com/version"
)
type Conn = derp.Conn
// verboseDropKeys is the set of destination public keys that should
// verbosely log whenever DERP drops a packet.
var verboseDropKeys = map[key.NodePublic]bool{}
// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests
// to indicate that they're connecting to their ideal (Region.Nodes[0]) node.
// The HTTP header value is the name of the node they wish they were connected
// to. This is an optional header.
const IdealNodeHeader = "Ideal-Node"
// IdealNodeContextKey is the context key used to pass the IdealNodeHeader value
// from the HTTP handler to the DERP server's Accept method.
var IdealNodeContextKey = ctxkey.New[string]("ideal-node", "")
var IdealNodeContextKey = ctxkey.New("ideal-node", "")
func init() {
keys := envknob.String("TS_DEBUG_VERBOSE_DROPS")
@ -620,7 +618,7 @@ func (s *Server) initMetacert() {
log.Fatal(err)
}
tmpl := &x509.Certificate{
SerialNumber: big.NewInt(ProtocolVersion),
SerialNumber: big.NewInt(derp.ProtocolVersion),
Subject: pkix.Name{
CommonName: derpconst.MetaCertCommonNamePrefix + s.publicKey.UntypedHexString(),
},
@ -724,7 +722,7 @@ func (s *Server) registerClient(c *sclient) {
// presence changed.
//
// s.mu must be held.
func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags, present bool) {
func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags, present bool) {
for w := range s.watchers {
w.peerStateChange = append(w.peerStateChange, peerConnState{
peer: peer,
@ -868,7 +866,7 @@ func (s *Server) notePeerGoneFromRegionLocked(key key.NodePublic) {
// requestPeerGoneWriteLimited sends a request to write a "peer gone"
// frame, but only in reply to a disco packet, and only if we haven't
// sent one recently.
func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason PeerGoneReasonType) {
func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason derp.PeerGoneReasonType) {
if disco.LooksLikeDiscoWrapper(contents) != true {
return
}
@ -1010,7 +1008,7 @@ func (c *sclient) run(ctx context.Context) error {
c.startStatsLoop(sendCtx)
for {
ft, fl, err := readFrameHeader(c.br)
ft, fl, err := derp.ReadFrameHeader(c.br)
c.debugLogf("read frame type %d len %d err %v", ft, fl, err)
if err != nil {
if errors.Is(err, io.EOF) {
@ -1025,17 +1023,17 @@ func (c *sclient) run(ctx context.Context) error {
}
c.s.noteClientActivity(c)
switch ft {
case frameNotePreferred:
case derp.FrameNotePreferred:
err = c.handleFrameNotePreferred(ft, fl)
case frameSendPacket:
case derp.FrameSendPacket:
err = c.handleFrameSendPacket(ft, fl)
case frameForwardPacket:
case derp.FrameForwardPacket:
err = c.handleFrameForwardPacket(ft, fl)
case frameWatchConns:
case derp.FrameWatchConns:
err = c.handleFrameWatchConns(ft, fl)
case frameClosePeer:
case derp.FrameClosePeer:
err = c.handleFrameClosePeer(ft, fl)
case framePing:
case derp.FramePing:
err = c.handleFramePing(ft, fl)
default:
err = c.handleUnknownFrame(ft, fl)
@ -1046,12 +1044,12 @@ func (c *sclient) run(ctx context.Context) error {
}
}
func (c *sclient) handleUnknownFrame(ft frameType, fl uint32) error {
func (c *sclient) handleUnknownFrame(ft derp.FrameType, fl uint32) error {
_, err := io.CopyN(io.Discard, c.br, int64(fl))
return err
}
func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error {
func (c *sclient) handleFrameNotePreferred(ft derp.FrameType, fl uint32) error {
if fl != 1 {
return fmt.Errorf("frameNotePreferred wrong size")
}
@ -1063,7 +1061,7 @@ func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error {
return nil
}
func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error {
func (c *sclient) handleFrameWatchConns(ft derp.FrameType, fl uint32) error {
if fl != 0 {
return fmt.Errorf("handleFrameWatchConns wrong size")
}
@ -1074,9 +1072,9 @@ func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error {
return nil
}
func (c *sclient) handleFramePing(ft frameType, fl uint32) error {
func (c *sclient) handleFramePing(ft derp.FrameType, fl uint32) error {
c.s.gotPing.Add(1)
var m PingMessage
var m derp.PingMessage
if fl < uint32(len(m)) {
return fmt.Errorf("short ping: %v", fl)
}
@ -1101,8 +1099,8 @@ func (c *sclient) handleFramePing(ft frameType, fl uint32) error {
return err
}
func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error {
if fl != keyLen {
func (c *sclient) handleFrameClosePeer(ft derp.FrameType, fl uint32) error {
if fl != derp.KeyLen {
return fmt.Errorf("handleFrameClosePeer wrong size")
}
if !c.canMesh {
@ -1135,7 +1133,7 @@ func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error {
// handleFrameForwardPacket reads a "forward packet" frame from the client
// (which must be a trusted client, a peer in our mesh).
func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error {
func (c *sclient) handleFrameForwardPacket(ft derp.FrameType, fl uint32) error {
if !c.canMesh {
return fmt.Errorf("insufficient permissions")
}
@ -1162,7 +1160,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error {
if dstLen > 1 {
reason = dropReasonDupClient
} else {
c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere)
c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere)
}
s.recordDrop(contents, srcKey, dstKey, reason)
return nil
@ -1178,7 +1176,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error {
}
// handleFrameSendPacket reads a "send packet" frame from the client.
func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error {
func (c *sclient) handleFrameSendPacket(ft derp.FrameType, fl uint32) error {
s := c.s
dstKey, contents, err := s.recvPacket(c.br, fl)
@ -1215,7 +1213,7 @@ func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error {
if dstLen > 1 {
reason = dropReasonDupClient
} else {
c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere)
c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere)
}
s.recordDrop(contents, c.key, dstKey, reason)
c.debugLogf("SendPacket for %s, dropping with reason=%s", dstKey.ShortString(), reason)
@ -1325,13 +1323,13 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error {
// notified (in a new goroutine) whenever a peer has disconnected from all DERP
// nodes in the current region.
func (c *sclient) onPeerGoneFromRegion(peer key.NodePublic) {
c.requestPeerGoneWrite(peer, PeerGoneReasonDisconnected)
c.requestPeerGoneWrite(peer, derp.PeerGoneReasonDisconnected)
}
// requestPeerGoneWrite sends a request to write a "peer gone" frame
// with an explanation of why it is gone. It blocks until either the
// write request is scheduled, or the client has closed.
func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason PeerGoneReasonType) {
func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason derp.PeerGoneReasonType) {
select {
case c.peerGone <- peerGoneMsg{
peer: peer,
@ -1358,7 +1356,7 @@ func (c *sclient) requestMeshUpdate() {
// isMeshPeer reports whether the client is a trusted mesh peer
// node in the DERP region.
func (s *Server) isMeshPeer(info *clientInfo) bool {
func (s *Server) isMeshPeer(info *derp.ClientInfo) bool {
// Compare mesh keys in constant time to prevent timing attacks.
// Since mesh keys are a fixed length, we dont need to be concerned
// about timing attacks on client mesh keys that are the wrong length.
@ -1372,7 +1370,7 @@ func (s *Server) isMeshPeer(info *clientInfo) bool {
// verifyClient checks whether the client is allowed to connect to the derper,
// depending on how & whether the server's been configured to verify.
func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *clientInfo, clientIP netip.Addr) error {
func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *derp.ClientInfo, clientIP netip.Addr) error {
if s.isMeshPeer(info) {
// Trusted mesh peer. No need to verify further. In fact, verifying
// further wouldn't work: it's not part of the tailnet so tailscaled and
@ -1436,10 +1434,10 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf
}
func (s *Server) sendServerKey(lw *lazyBufioWriter) error {
buf := make([]byte, 0, len(magic)+key.NodePublicRawLen)
buf = append(buf, magic...)
buf := make([]byte, 0, len(derp.Magic)+key.NodePublicRawLen)
buf = append(buf, derp.Magic...)
buf = s.publicKey.AppendTo(buf)
err := writeFrame(lw.bw(), frameServerKey, buf)
err := derp.WriteFrame(lw.bw(), derp.FrameServerKey, buf)
lw.Flush() // redundant (no-op) flush to release bufio.Writer
return err
}
@ -1504,21 +1502,16 @@ func (s *Server) noteClientActivity(c *sclient) {
dup.sendHistory = append(dup.sendHistory, c)
}
type serverInfo struct {
Version int `json:"version,omitempty"`
TokenBucketBytesPerSecond int `json:",omitempty"`
TokenBucketBytesBurst int `json:",omitempty"`
}
type ServerInfo = derp.ServerInfo
func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) error {
msg, err := json.Marshal(serverInfo{Version: ProtocolVersion})
msg, err := json.Marshal(ServerInfo{Version: derp.ProtocolVersion})
if err != nil {
return err
}
msgbox := s.privateKey.SealTo(clientKey, msg)
if err := writeFrameHeader(bw.bw(), frameServerInfo, uint32(len(msgbox))); err != nil {
if err := derp.WriteFrameHeader(bw.bw(), derp.FrameServerInfo, uint32(len(msgbox))); err != nil {
return err
}
if _, err := bw.Write(msgbox); err != nil {
@ -1530,12 +1523,12 @@ func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) e
// recvClientKey reads the frameClientInfo frame from the client (its
// proof of identity) upon its initial connection. It should be
// considered especially untrusted at this point.
func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *clientInfo, err error) {
fl, err := readFrameTypeHeader(br, frameClientInfo)
func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *derp.ClientInfo, err error) {
fl, err := derp.ReadFrameTypeHeader(br, derp.FrameClientInfo)
if err != nil {
return zpub, nil, err
}
const minLen = keyLen + nonceLen
const minLen = derp.KeyLen + derp.NonceLen
if fl < minLen {
return zpub, nil, errors.New("short client info")
}
@ -1547,7 +1540,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info
if err := clientKey.ReadRawWithoutAllocating(br); err != nil {
return zpub, nil, err
}
msgLen := int(fl - keyLen)
msgLen := int(fl - derp.KeyLen)
msgbox := make([]byte, msgLen)
if _, err := io.ReadFull(br, msgbox); err != nil {
return zpub, nil, fmt.Errorf("msgbox: %v", err)
@ -1556,7 +1549,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info
if !ok {
return zpub, nil, fmt.Errorf("msgbox: cannot open len=%d with client key %s", msgLen, clientKey)
}
info = new(clientInfo)
info = new(derp.ClientInfo)
if err := json.Unmarshal(msg, info); err != nil {
return zpub, nil, fmt.Errorf("msg: %v", err)
}
@ -1564,15 +1557,15 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info
}
func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodePublic, contents []byte, err error) {
if frameLen < keyLen {
if frameLen < derp.KeyLen {
return zpub, nil, errors.New("short send packet frame")
}
if err := dstKey.ReadRawWithoutAllocating(br); err != nil {
return zpub, nil, err
}
packetLen := frameLen - keyLen
if packetLen > MaxPacketSize {
return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize)
packetLen := frameLen - derp.KeyLen
if packetLen > derp.MaxPacketSize {
return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize)
}
contents = make([]byte, packetLen)
if _, err := io.ReadFull(br, contents); err != nil {
@ -1592,7 +1585,7 @@ func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodeP
var zpub key.NodePublic
func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, dstKey key.NodePublic, contents []byte, err error) {
if frameLen < keyLen*2 {
if frameLen < derp.KeyLen*2 {
return zpub, zpub, nil, errors.New("short send packet frame")
}
if err := srcKey.ReadRawWithoutAllocating(br); err != nil {
@ -1601,9 +1594,9 @@ func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, d
if err := dstKey.ReadRawWithoutAllocating(br); err != nil {
return zpub, zpub, nil, err
}
packetLen := frameLen - keyLen*2
if packetLen > MaxPacketSize {
return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize)
packetLen := frameLen - derp.KeyLen*2
if packetLen > derp.MaxPacketSize {
return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize)
}
contents = make([]byte, packetLen)
if _, err := io.ReadFull(br, contents); err != nil {
@ -1628,7 +1621,7 @@ type sclient struct {
s *Server
nc Conn
key key.NodePublic
info clientInfo
info derp.ClientInfo
logf logger.Logf
done <-chan struct{} // closed when connection closes
remoteIPPort netip.AddrPort // zero if remoteAddr is not ip:port.
@ -1666,19 +1659,19 @@ type sclient struct {
peerGoneLim *rate.Limiter
}
func (c *sclient) presentFlags() PeerPresentFlags {
var f PeerPresentFlags
func (c *sclient) presentFlags() derp.PeerPresentFlags {
var f derp.PeerPresentFlags
if c.info.IsProber {
f |= PeerPresentIsProber
f |= derp.PeerPresentIsProber
}
if c.canMesh {
f |= PeerPresentIsMeshPeer
f |= derp.PeerPresentIsMeshPeer
}
if c.isNotIdealConn {
f |= PeerPresentNotIdeal
f |= derp.PeerPresentNotIdeal
}
if f == 0 {
return PeerPresentIsRegular
return derp.PeerPresentIsRegular
}
return f
}
@ -1688,7 +1681,7 @@ func (c *sclient) presentFlags() PeerPresentFlags {
type peerConnState struct {
ipPort netip.AddrPort // if present, the peer's IP:port
peer key.NodePublic
flags PeerPresentFlags
flags derp.PeerPresentFlags
present bool
}
@ -1709,7 +1702,7 @@ type pkt struct {
// peerGoneMsg is a request to write a peerGone frame to an sclient
type peerGoneMsg struct {
peer key.NodePublic
reason PeerGoneReasonType
reason derp.PeerGoneReasonType
}
func (c *sclient) setPreferred(v bool) {
@ -1788,7 +1781,7 @@ func (c *sclient) sendLoop(ctx context.Context) error {
defer c.onSendLoopDone()
jitter := rand.N(5 * time.Second)
keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(KeepAlive + jitter)
keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(derp.KeepAlive + jitter)
defer keepAliveTick.Stop()
var werr error // last write error
@ -1887,14 +1880,14 @@ func (c *sclient) setWriteDeadline() {
// sendKeepAlive sends a keep-alive frame, without flushing.
func (c *sclient) sendKeepAlive() error {
c.setWriteDeadline()
return writeFrameHeader(c.bw.bw(), frameKeepAlive, 0)
return derp.WriteFrameHeader(c.bw.bw(), derp.FrameKeepAlive, 0)
}
// sendPong sends a pong reply, without flushing.
func (c *sclient) sendPong(data [8]byte) error {
c.s.sentPong.Add(1)
c.setWriteDeadline()
if err := writeFrameHeader(c.bw.bw(), framePong, uint32(len(data))); err != nil {
if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePong, uint32(len(data))); err != nil {
return err
}
_, err := c.bw.Write(data[:])
@ -1902,23 +1895,23 @@ func (c *sclient) sendPong(data [8]byte) error {
}
const (
peerGoneFrameLen = keyLen + 1
peerPresentFrameLen = keyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags
peerGoneFrameLen = derp.KeyLen + 1
peerPresentFrameLen = derp.KeyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags
)
// sendPeerGone sends a peerGone frame, without flushing.
func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) error {
func (c *sclient) sendPeerGone(peer key.NodePublic, reason derp.PeerGoneReasonType) error {
switch reason {
case PeerGoneReasonDisconnected:
case derp.PeerGoneReasonDisconnected:
c.s.peerGoneDisconnectedFrames.Add(1)
case PeerGoneReasonNotHere:
case derp.PeerGoneReasonNotHere:
c.s.peerGoneNotHereFrames.Add(1)
}
c.setWriteDeadline()
data := make([]byte, 0, peerGoneFrameLen)
data = peer.AppendTo(data)
data = append(data, byte(reason))
if err := writeFrameHeader(c.bw.bw(), framePeerGone, uint32(len(data))); err != nil {
if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerGone, uint32(len(data))); err != nil {
return err
}
@ -1927,17 +1920,17 @@ func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) e
}
// sendPeerPresent sends a peerPresent frame, without flushing.
func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags) error {
func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags) error {
c.setWriteDeadline()
if err := writeFrameHeader(c.bw.bw(), framePeerPresent, peerPresentFrameLen); err != nil {
if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerPresent, peerPresentFrameLen); err != nil {
return err
}
payload := make([]byte, peerPresentFrameLen)
_ = peer.AppendTo(payload[:0])
a16 := ipPort.Addr().As16()
copy(payload[keyLen:], a16[:])
binary.BigEndian.PutUint16(payload[keyLen+16:], ipPort.Port())
payload[keyLen+18] = byte(flags)
copy(payload[derp.KeyLen:], a16[:])
binary.BigEndian.PutUint16(payload[derp.KeyLen+16:], ipPort.Port())
payload[derp.KeyLen+18] = byte(flags)
_, err := c.bw.Write(payload)
return err
}
@ -1975,7 +1968,7 @@ func (c *sclient) sendMeshUpdates() error {
if pcs.present {
err = c.sendPeerPresent(pcs.peer, pcs.ipPort, pcs.flags)
} else {
err = c.sendPeerGone(pcs.peer, PeerGoneReasonDisconnected)
err = c.sendPeerGone(pcs.peer, derp.PeerGoneReasonDisconnected)
}
if err != nil {
return err
@ -2010,7 +2003,7 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error)
pktLen += key.NodePublicRawLen
c.noteSendFromSrc(srcKey)
}
if err = writeFrameHeader(c.bw.bw(), frameRecvPacket, uint32(pktLen)); err != nil {
if err = derp.WriteFrameHeader(c.bw.bw(), derp.FrameRecvPacket, uint32(pktLen)); err != nil {
return err
}
if withKey {
@ -2286,7 +2279,7 @@ func (s *Server) checkVerifyClientsLocalTailscaled() error {
if err != nil {
return fmt.Errorf("localClient.Status: %w", err)
}
info := &clientInfo{
info := &derp.ClientInfo{
IsProber: true,
}
clientIP := netip.IPv6Loopback()

View File

@ -3,7 +3,7 @@
//go:build !linux || android
package derp
package derpserver
import "context"

View File

@ -3,7 +3,7 @@
//go:build linux && !android
package derp
package derpserver
import (
"context"

View File

@ -0,0 +1,782 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derpserver
import (
"bufio"
"cmp"
"context"
"crypto/x509"
"encoding/asn1"
"expvar"
"fmt"
"log"
"net"
"os"
"reflect"
"strconv"
"sync"
"testing"
"time"
qt "github.com/frankban/quicktest"
"go4.org/mem"
"golang.org/x/time/rate"
"tailscale.com/derp"
"tailscale.com/derp/derpconst"
"tailscale.com/types/key"
"tailscale.com/types/logger"
)
const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
func TestSetMeshKey(t *testing.T) {
for name, tt := range map[string]struct {
key string
want key.DERPMesh
wantErr bool
}{
"clobber": {
key: testMeshKey,
wantErr: false,
},
"invalid": {
key: "badf00d",
wantErr: true,
},
} {
t.Run(name, func(t *testing.T) {
s := &Server{}
err := s.SetMeshKey(tt.key)
if tt.wantErr {
if err == nil {
t.Fatalf("expected err")
}
return
}
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
want, err := key.ParseDERPMesh(tt.key)
if err != nil {
t.Fatal(err)
}
if !s.meshKey.Equal(want) {
t.Fatalf("got %v, want %v", s.meshKey, want)
}
})
}
}
func TestIsMeshPeer(t *testing.T) {
s := &Server{}
err := s.SetMeshKey(testMeshKey)
if err != nil {
t.Fatal(err)
}
for name, tt := range map[string]struct {
want bool
meshKey string
wantAllocs float64
}{
"nil": {
want: false,
wantAllocs: 0,
},
"mismatch": {
meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8",
want: false,
wantAllocs: 1,
},
"match": {
meshKey: testMeshKey,
want: true,
wantAllocs: 0,
},
} {
t.Run(name, func(t *testing.T) {
var got bool
var mKey key.DERPMesh
if tt.meshKey != "" {
mKey, err = key.ParseDERPMesh(tt.meshKey)
if err != nil {
t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err)
}
}
info := derp.ClientInfo{
MeshKey: mKey,
}
allocs := testing.AllocsPerRun(1, func() {
got = s.isMeshPeer(&info)
})
if got != tt.want {
t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info)
}
if allocs != tt.wantAllocs && tt.want {
t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs)
}
})
}
}
type testFwd int
func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error {
panic("not called in tests")
}
func (testFwd) String() string {
panic("not called in tests")
}
func pubAll(b byte) (ret key.NodePublic) {
var bs [32]byte
for i := range bs {
bs[i] = b
}
return key.NodePublicFromRaw32(mem.B(bs[:]))
}
func TestForwarderRegistration(t *testing.T) {
s := &Server{
clients: make(map[key.NodePublic]*clientSet),
clientsMesh: map[key.NodePublic]PacketForwarder{},
}
want := func(want map[key.NodePublic]PacketForwarder) {
t.Helper()
if got := s.clientsMesh; !reflect.DeepEqual(got, want) {
t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want)
}
}
wantCounter := func(c *expvar.Int, want int) {
t.Helper()
if got := c.Value(); got != int64(want) {
t.Errorf("counter = %v; want %v", got, want)
}
}
singleClient := func(c *sclient) *clientSet {
cs := &clientSet{}
cs.activeClient.Store(c)
return cs
}
u1 := pubAll(1)
u2 := pubAll(2)
u3 := pubAll(3)
s.AddPacketForwarder(u1, testFwd(1))
s.AddPacketForwarder(u2, testFwd(2))
want(map[key.NodePublic]PacketForwarder{
u1: testFwd(1),
u2: testFwd(2),
})
// Verify a remove of non-registered forwarder is no-op.
s.RemovePacketForwarder(u2, testFwd(999))
want(map[key.NodePublic]PacketForwarder{
u1: testFwd(1),
u2: testFwd(2),
})
// Verify a remove of non-registered user is no-op.
s.RemovePacketForwarder(u3, testFwd(1))
want(map[key.NodePublic]PacketForwarder{
u1: testFwd(1),
u2: testFwd(2),
})
// Actual removal.
s.RemovePacketForwarder(u2, testFwd(2))
want(map[key.NodePublic]PacketForwarder{
u1: testFwd(1),
})
// Adding a dup for a user.
wantCounter(&s.multiForwarderCreated, 0)
s.AddPacketForwarder(u1, testFwd(100))
s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path
want(map[key.NodePublic]PacketForwarder{
u1: newMultiForwarder(testFwd(1), testFwd(100)),
})
wantCounter(&s.multiForwarderCreated, 1)
// Removing a forwarder in a multi set that doesn't exist; does nothing.
s.RemovePacketForwarder(u1, testFwd(55))
want(map[key.NodePublic]PacketForwarder{
u1: newMultiForwarder(testFwd(1), testFwd(100)),
})
// Removing a forwarder in a multi set that does exist should collapse it away
// from being a multiForwarder.
wantCounter(&s.multiForwarderDeleted, 0)
s.RemovePacketForwarder(u1, testFwd(1))
want(map[key.NodePublic]PacketForwarder{
u1: testFwd(100),
})
wantCounter(&s.multiForwarderDeleted, 1)
// Removing an entry for a client that's still connected locally should result
// in a nil forwarder.
u1c := &sclient{
key: u1,
logf: logger.Discard,
}
s.clients[u1] = singleClient(u1c)
s.RemovePacketForwarder(u1, testFwd(100))
want(map[key.NodePublic]PacketForwarder{
u1: nil,
})
// But once that client disconnects, it should go away.
s.unregisterClient(u1c)
want(map[key.NodePublic]PacketForwarder{})
// But if it already has a forwarder, it's not removed.
s.AddPacketForwarder(u1, testFwd(2))
s.unregisterClient(u1c)
want(map[key.NodePublic]PacketForwarder{
u1: testFwd(2),
})
// Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard
// that they're also connected to a peer of ours. That shouldn't transition the forwarder
// from nil to the new one, not a multiForwarder.
s.clients[u1] = singleClient(u1c)
s.clientsMesh[u1] = nil
want(map[key.NodePublic]PacketForwarder{
u1: nil,
})
s.AddPacketForwarder(u1, testFwd(3))
want(map[key.NodePublic]PacketForwarder{
u1: testFwd(3),
})
}
type channelFwd struct {
// id is to ensure that different instances that reference the
// same channel are not equal, as they are used as keys in the
// multiForwarder map.
id int
c chan []byte
}
func (f channelFwd) String() string { return "" }
func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error {
f.c <- packet
return nil
}
func TestMultiForwarder(t *testing.T) {
received := 0
var wg sync.WaitGroup
ch := make(chan []byte)
ctx, cancel := context.WithCancel(context.Background())
s := &Server{
clients: make(map[key.NodePublic]*clientSet),
clientsMesh: map[key.NodePublic]PacketForwarder{},
}
u := pubAll(1)
s.AddPacketForwarder(u, channelFwd{1, ch})
wg.Add(2)
go func() {
defer wg.Done()
for {
select {
case <-ch:
received += 1
case <-ctx.Done():
return
}
}
}()
go func() {
defer wg.Done()
for {
s.AddPacketForwarder(u, channelFwd{2, ch})
s.AddPacketForwarder(u, channelFwd{3, ch})
s.RemovePacketForwarder(u, channelFwd{2, ch})
s.RemovePacketForwarder(u, channelFwd{1, ch})
s.AddPacketForwarder(u, channelFwd{1, ch})
s.RemovePacketForwarder(u, channelFwd{3, ch})
if ctx.Err() != nil {
return
}
}
}()
// Number of messages is chosen arbitrarily, just for this loop to
// run long enough concurrently with {Add,Remove}PacketForwarder loop above.
numMsgs := 5000
var fwd PacketForwarder
for i := range numMsgs {
s.mu.Lock()
fwd = s.clientsMesh[u]
s.mu.Unlock()
fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i)))
}
cancel()
wg.Wait()
if received != numMsgs {
t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received)
}
}
func TestMetaCert(t *testing.T) {
priv := key.NewNode()
pub := priv.Public()
s := NewServer(priv, t.Logf)
certBytes := s.MetaCert()
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
log.Fatal(err)
}
if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(derp.ProtocolVersion) {
t.Errorf("serial = %v; want %v", cert.SerialNumber, derp.ProtocolVersion)
}
if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w {
t.Errorf("CommonName = %q; want %q", g, w)
}
if n := len(cert.Extensions); n != 1 {
t.Fatalf("got %d extensions; want 1", n)
}
// oidExtensionBasicConstraints is the Basic Constraints ID copied
// from the x509 package.
oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19}
if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) {
t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints)
}
}
func TestServerDupClients(t *testing.T) {
serverPriv := key.NewNode()
var s *Server
clientPriv := key.NewNode()
clientPub := clientPriv.Public()
var c1, c2, c3 *sclient
var clientName map[*sclient]string
// run starts a new test case and resets clients back to their zero values.
run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) {
s = NewServer(serverPriv, t.Logf)
s.dupPolicy = dupPolicy
c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")}
c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")}
c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")}
clientName = map[*sclient]string{
c1: "c1",
c2: "c2",
c3: "c3",
}
t.Run(name, f)
}
runBothWays := func(name string, f func(t *testing.T)) {
run(name+"_disablefighters", disableFighters, f)
run(name+"_lastwriteractive", lastWriterIsActive, f)
}
wantSingleClient := func(t *testing.T, want *sclient) {
t.Helper()
got, ok := s.clients[want.key]
if !ok {
t.Error("no clients for key")
return
}
if got.dup != nil {
t.Errorf("unexpected dup set for single client")
}
cur := got.activeClient.Load()
if cur != want {
t.Errorf("active client = %q; want %q", clientName[cur], clientName[want])
}
if cur != nil {
if cur.isDup.Load() {
t.Errorf("unexpected isDup on singleClient")
}
if cur.isDisabled.Load() {
t.Errorf("unexpected isDisabled on singleClient")
}
}
}
wantNoClient := func(t *testing.T) {
t.Helper()
_, ok := s.clients[clientPub]
if !ok {
// Good
return
}
t.Errorf("got client; want empty")
}
wantDupSet := func(t *testing.T) *dupClientSet {
t.Helper()
cs, ok := s.clients[clientPub]
if !ok {
t.Fatal("no set for key; want dup set")
return nil
}
if cs.dup != nil {
return cs.dup
}
t.Fatalf("no dup set for key; want dup set")
return nil
}
wantActive := func(t *testing.T, want *sclient) {
t.Helper()
set, ok := s.clients[clientPub]
if !ok {
t.Error("no set for key")
return
}
got := set.activeClient.Load()
if got != want {
t.Errorf("active client = %q; want %q", clientName[got], clientName[want])
}
}
checkDup := func(t *testing.T, c *sclient, want bool) {
t.Helper()
if got := c.isDup.Load(); got != want {
t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want)
}
}
checkDisabled := func(t *testing.T, c *sclient, want bool) {
t.Helper()
if got := c.isDisabled.Load(); got != want {
t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want)
}
}
wantDupConns := func(t *testing.T, want int) {
t.Helper()
if got := s.dupClientConns.Value(); got != int64(want) {
t.Errorf("dupClientConns = %v; want %v", got, want)
}
}
wantDupKeys := func(t *testing.T, want int) {
t.Helper()
if got := s.dupClientKeys.Value(); got != int64(want) {
t.Errorf("dupClientKeys = %v; want %v", got, want)
}
}
// Common case: a single client comes and goes, with no dups.
runBothWays("one_comes_and_goes", func(t *testing.T) {
wantNoClient(t)
s.registerClient(c1)
wantSingleClient(t, c1)
s.unregisterClient(c1)
wantNoClient(t)
})
// A still somewhat common case: a single client was
// connected and then their wifi dies or laptop closes
// or they switch networks and connect from a
// different network. They have two connections but
// it's not very bad. Only their new one is
// active. The last one, being dead, doesn't send and
// thus the new one doesn't get disabled.
runBothWays("small_overlap_replacement", func(t *testing.T) {
wantNoClient(t)
s.registerClient(c1)
wantSingleClient(t, c1)
wantActive(t, c1)
wantDupKeys(t, 0)
wantDupKeys(t, 0)
s.registerClient(c2) // wifi dies; c2 replacement connects
wantDupSet(t)
wantDupConns(t, 2)
wantDupKeys(t, 1)
checkDup(t, c1, true)
checkDup(t, c2, true)
checkDisabled(t, c1, false)
checkDisabled(t, c2, false)
wantActive(t, c2) // sends go to the replacement
s.unregisterClient(c1) // c1 finally times out
wantSingleClient(t, c2)
checkDup(t, c2, false) // c2 is longer a dup
wantActive(t, c2)
wantDupConns(t, 0)
wantDupKeys(t, 0)
})
// Key cloning situation with concurrent clients, both trying
// to write.
run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) {
wantNoClient(t)
s.registerClient(c1)
wantSingleClient(t, c1)
wantActive(t, c1)
s.registerClient(c2)
wantDupSet(t)
wantDupKeys(t, 1)
wantDupConns(t, 2)
wantActive(t, c2)
checkDup(t, c1, true)
checkDup(t, c2, true)
checkDisabled(t, c1, false)
checkDisabled(t, c2, false)
s.noteClientActivity(c2)
checkDisabled(t, c1, false)
checkDisabled(t, c2, false)
s.noteClientActivity(c1)
checkDisabled(t, c1, true)
checkDisabled(t, c2, true)
wantActive(t, nil)
s.registerClient(c3)
wantActive(t, c3)
checkDisabled(t, c3, false)
wantDupKeys(t, 1)
wantDupConns(t, 3)
s.unregisterClient(c3)
wantActive(t, nil)
wantDupKeys(t, 1)
wantDupConns(t, 2)
s.unregisterClient(c2)
wantSingleClient(t, c1)
wantDupKeys(t, 0)
wantDupConns(t, 0)
})
// Key cloning with an A->B->C->A series instead.
run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) {
wantNoClient(t)
s.registerClient(c1)
s.registerClient(c2)
s.registerClient(c3)
s.noteClientActivity(c1)
checkDisabled(t, c1, true)
checkDisabled(t, c2, true)
checkDisabled(t, c3, true)
wantActive(t, nil)
})
run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) {
wantNoClient(t)
// Last registered client is the active one...
s.registerClient(c1)
wantActive(t, c1)
s.registerClient(c2)
wantActive(t, c2)
s.registerClient(c3)
s.noteClientActivity(c2)
wantActive(t, c3)
// But if the last one goes away, the one with the
// most recent activity wins.
s.unregisterClient(c3)
wantActive(t, c2)
})
run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) {
wantNoClient(t)
s.registerClient(c1)
wantActive(t, c1)
s.registerClient(c2)
wantActive(t, c2)
s.noteClientActivity(c1)
checkDisabled(t, c1, false)
checkDisabled(t, c2, false)
wantActive(t, c1)
s.noteClientActivity(c2)
checkDisabled(t, c1, false)
checkDisabled(t, c2, false)
wantActive(t, c2)
s.unregisterClient(c2)
checkDisabled(t, c1, false)
wantActive(t, c1)
})
}
func TestLimiter(t *testing.T) {
rl := rate.NewLimiter(rate.Every(time.Minute), 100)
for i := range 200 {
r := rl.Reserve()
d := r.Delay()
t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d)
}
}
// BenchmarkConcurrentStreams exercises mutex contention on a
// single Server instance with multiple concurrent client flows.
func BenchmarkConcurrentStreams(b *testing.B) {
serverPrivateKey := key.NewNode()
s := NewServer(serverPrivateKey, logger.Discard)
defer s.Close()
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
b.Fatal(err)
}
defer ln.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for ctx.Err() == nil {
connIn, err := ln.Accept()
if err != nil {
if ctx.Err() != nil {
return
}
b.Error(err)
return
}
brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn))
go s.Accept(ctx, connIn, brwServer, "test-client")
}
}()
newClient := func(t testing.TB) *derp.Client {
t.Helper()
connOut, err := net.Dial("tcp", ln.Addr().String())
if err != nil {
b.Fatal(err)
}
t.Cleanup(func() { connOut.Close() })
k := key.NewNode()
brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut))
client, err := derp.NewClient(k, connOut, brw, logger.Discard)
if err != nil {
b.Fatalf("client: %v", err)
}
return client
}
b.RunParallel(func(pb *testing.PB) {
c1, c2 := newClient(b), newClient(b)
const packetSize = 100
msg := make([]byte, packetSize)
for pb.Next() {
if err := c1.Send(c2.PublicKey(), msg); err != nil {
b.Fatal(err)
}
_, err := c2.Recv()
if err != nil {
return
}
}
})
}
func BenchmarkSendRecv(b *testing.B) {
for _, size := range []int{10, 100, 1000, 10000} {
b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) })
}
}
func benchmarkSendRecvSize(b *testing.B, packetSize int) {
serverPrivateKey := key.NewNode()
s := NewServer(serverPrivateKey, logger.Discard)
defer s.Close()
k := key.NewNode()
clientKey := k.Public()
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
b.Fatal(err)
}
defer ln.Close()
connOut, err := net.Dial("tcp", ln.Addr().String())
if err != nil {
b.Fatal(err)
}
defer connOut.Close()
connIn, err := ln.Accept()
if err != nil {
b.Fatal(err)
}
defer connIn.Close()
brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go s.Accept(ctx, connIn, brwServer, "test-client")
brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut))
client, err := derp.NewClient(k, connOut, brw, logger.Discard)
if err != nil {
b.Fatalf("client: %v", err)
}
go func() {
for {
_, err := client.Recv()
if err != nil {
return
}
}
}()
msg := make([]byte, packetSize)
b.SetBytes(int64(len(msg)))
b.ReportAllocs()
b.ResetTimer()
for range b.N {
if err := client.Send(clientKey, msg); err != nil {
b.Fatal(err)
}
}
}
func TestParseSSOutput(t *testing.T) {
contents, err := os.ReadFile("testdata/example_ss.txt")
if err != nil {
t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err)
}
seen := parseSSOutput(string(contents))
if len(seen) == 0 {
t.Errorf("parseSSOutput expected non-empty map")
}
}
func TestGetPerClientSendQueueDepth(t *testing.T) {
c := qt.New(t)
envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH"
testCases := []struct {
envVal string
want int
}{
// Empty case, envknob treats empty as missing also.
{
"", defaultPerClientSendQueueDepth,
},
{
"64", 64,
},
}
for _, tc := range testCases {
t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) {
t.Setenv(envKey, tc.envVal)
val := getPerClientSendQueueDepth()
c.Assert(val, qt.Equals, tc.want)
})
}
}

View File

@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derphttp
package derpserver
import (
"fmt"
@ -12,14 +12,8 @@ import (
"tailscale.com/derp"
)
// fastStartHeader is the header (with value "1") that signals to the HTTP
// server that the DERP HTTP client does not want the HTTP 101 response
// headers and it will begin writing & reading the DERP protocol immediately
// following its HTTP request.
const fastStartHeader = "Derp-Fast-Start"
// Handler returns an http.Handler to be mounted at /derp, serving s.
func Handler(s *derp.Server) http.Handler {
func Handler(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -42,7 +36,7 @@ func Handler(s *derp.Server) http.Handler {
return
}
fastStart := r.Header.Get(fastStartHeader) == "1"
fastStart := r.Header.Get(derp.FastStartHeader) == "1"
h, ok := w.(http.Hijacker)
if !ok {
@ -69,7 +63,7 @@ func Handler(s *derp.Server) http.Handler {
}
if v := r.Header.Get(derp.IdealNodeHeader); v != "" {
ctx = derp.IdealNodeContextKey.WithValue(ctx, v)
ctx = IdealNodeContextKey.WithValue(ctx, v)
}
s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String())

10
derp/export_test.go Normal file
View File

@ -0,0 +1,10 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package derp
import "time"
func (c *Client) RecvTimeoutForTest(timeout time.Duration) (m ReceivedMessage, err error) {
return c.recvTimeout(timeout)
}

View File

@ -8,15 +8,13 @@ package ipnlocal
import (
"errors"
"net"
"tailscale.com/client/local"
)
const webClientPort = 5252
type webClient struct{}
func (b *LocalBackend) ConfigureWebClient(lc *local.Client) {}
func (b *LocalBackend) ConfigureWebClient(any) {}
func (b *LocalBackend) webClientGetOrInit() error {
return errors.New("not implemented")

View File

@ -15,7 +15,7 @@ import (
"testing"
"time"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/netmon"
"tailscale.com/syncs"
"tailscale.com/tstest/nettest"
@ -136,7 +136,7 @@ func TestAgainstDERPHandler(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := httptest.NewServer(http.HandlerFunc(derphttp.ServeNoContent))
s := httptest.NewServer(http.HandlerFunc(derpserver.ServeNoContent))
defer s.Close()
e := Endpoint{
URL: must.Get(url.Parse(s.URL + "/generate_204")),

View File

@ -16,6 +16,7 @@ import (
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/netmon"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
@ -145,12 +146,12 @@ func TestDerpProber(t *testing.T) {
func TestRunDerpProbeNodePair(t *testing.T) {
// os.Setenv("DERP_DEBUG_LOGS", "true")
serverPrivateKey := key.NewNode()
s := derp.NewServer(serverPrivateKey, t.Logf)
s := derpserver.NewServer(serverPrivateKey, t.Logf)
defer s.Close()
httpsrv := &http.Server{
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: derphttp.Handler(s),
Handler: derpserver.Handler(s),
}
ln, err := net.Listen("tcp4", "localhost:0")
if err != nil {

View File

@ -222,9 +222,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware)
tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/control/controlknobs from tailscale.com/control/controlclient+
tailscale.com/derp from tailscale.com/derp/derphttp+
tailscale.com/derp/derpconst from tailscale.com/derp+
tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+
tailscale.com/disco from tailscale.com/derp+
tailscale.com/disco from tailscale.com/net/tstun+
tailscale.com/doctor from tailscale.com/ipn/ipnlocal
tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal
💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal
@ -266,7 +266,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware)
tailscale.com/logtail from tailscale.com/control/controlclient+
tailscale.com/logtail/backoff from tailscale.com/control/controlclient+
tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+
tailscale.com/metrics from tailscale.com/derp+
tailscale.com/metrics from tailscale.com/health+
tailscale.com/net/ace from tailscale.com/control/controlhttp
tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+
💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock
@ -302,7 +302,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware)
tailscale.com/net/socks5 from tailscale.com/tsnet
tailscale.com/net/sockstats from tailscale.com/control/controlclient+
tailscale.com/net/stun from tailscale.com/ipn/localapi+
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/control/controlclient+
tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
@ -327,7 +326,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware)
tailscale.com/tsd from tailscale.com/ipn/ipnext+
tailscale.com/tstime from tailscale.com/control/controlclient+
tailscale.com/tstime/mono from tailscale.com/net/tstun+
tailscale.com/tstime/rate from tailscale.com/derp+
tailscale.com/tstime/rate from tailscale.com/wgengine/filter
tailscale.com/tsweb from tailscale.com/util/eventbus
tailscale.com/tsweb/varz from tailscale.com/tsweb+
tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal
@ -559,7 +558,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware)
encoding/pem from crypto/tls+
encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+
errors from archive/tar+
expvar from tailscale.com/derp+
expvar from tailscale.com/health+
flag from tailscale.com/util/testenv
fmt from archive/tar+
hash from compress/zlib+

View File

@ -34,8 +34,7 @@ import (
"go4.org/mem"
"tailscale.com/client/local"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnlocal"
"tailscale.com/ipn/ipnstate"
@ -297,14 +296,14 @@ func exe() string {
func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) {
t.Helper()
d := derp.NewServer(key.NewNode(), logf)
d := derpserver.NewServer(key.NewNode(), logf)
ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0"))
if err != nil {
t.Fatal(err)
}
httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d))
httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d))
httpsrv.Listener.Close()
httpsrv.Listener = ln
httpsrv.Config.ErrorLog = logger.StdLogger(logf)

View File

@ -51,8 +51,7 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/waiter"
"tailscale.com/client/local"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/net/netutil"
"tailscale.com/net/netx"
"tailscale.com/net/stun"
@ -601,7 +600,7 @@ func (n *node) String() string {
}
type derpServer struct {
srv *derp.Server
srv *derpserver.Server
handler http.Handler
tlsConfig *tls.Config
}
@ -612,12 +611,12 @@ func newDERPServer() *derpServer {
ts.Close()
ds := &derpServer{
srv: derp.NewServer(key.NewNode(), logger.Discard),
srv: derpserver.NewServer(key.NewNode(), logger.Discard),
tlsConfig: ts.TLS, // self-signed; test client configure to not check
}
var mux http.ServeMux
mux.Handle("/derp", derphttp.Handler(ds.srv))
mux.HandleFunc("/generate_204", derphttp.ServeNoContent)
mux.Handle("/derp", derpserver.Handler(ds.srv))
mux.HandleFunc("/generate_204", derpserver.ServeNoContent)
ds.handler = &mux
return ds

View File

@ -39,8 +39,7 @@ import (
"golang.org/x/net/ipv4"
"tailscale.com/cmd/testwrapper/flakytest"
"tailscale.com/control/controlknobs"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/derp/derpserver"
"tailscale.com/disco"
"tailscale.com/envknob"
"tailscale.com/health"
@ -112,9 +111,9 @@ func (c *Conn) WaitReady(t testing.TB) {
}
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
d := derp.NewServer(key.NewNode(), logf)
d := derpserver.NewServer(key.NewNode(), logf)
httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d))
httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d))
httpsrv.Config.ErrorLog = logger.StdLogger(logf)
httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
httpsrv.StartTLS()