mirror of
https://github.com/tailscale/tailscale.git
synced 2025-10-04 12:02:05 +02:00
derp/derpserver: clean up extraction of derp.Server (#17264)
PR #17258 extracted `derp.Server` into `derp/derpserver.Server`. This followup patch adds the following cleanups: 1. Rename `derp_server*.go` files to `derpserver*.go` to match the package name. 2. Rename the `derpserver.NewServer` constructor to `derpserver.New` to reduce stuttering. 3. Remove the unnecessary `derpserver.Conn` type alias. Updates #17257 Updates #cleanup Signed-off-by: Simon Law <sfllaw@tailscale.com>
This commit is contained in:
parent
db02a46645
commit
34242df51b
@ -131,7 +131,7 @@ func TestPinnedCertRawIP(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer ln.Close()
|
defer ln.Close()
|
||||||
|
|
||||||
ds := derpserver.NewServer(key.NewNode(), t.Logf)
|
ds := derpserver.New(key.NewNode(), t.Logf)
|
||||||
|
|
||||||
derpHandler := derpserver.Handler(ds)
|
derpHandler := derpserver.Handler(ds)
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
|
@ -188,7 +188,7 @@ func main() {
|
|||||||
|
|
||||||
serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual"
|
serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual"
|
||||||
|
|
||||||
s := derpserver.NewServer(cfg.PrivateKey, log.Printf)
|
s := derpserver.New(cfg.PrivateKey, log.Printf)
|
||||||
s.SetVerifyClient(*verifyClients)
|
s.SetVerifyClient(*verifyClients)
|
||||||
s.SetTailscaledSocketPath(*socket)
|
s.SetTailscaledSocketPath(*socket)
|
||||||
s.SetVerifyClientURL(*verifyClientURL)
|
s.SetVerifyClientURL(*verifyClientURL)
|
||||||
|
@ -83,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) {
|
|||||||
|
|
||||||
func TestSendRecv(t *testing.T) {
|
func TestSendRecv(t *testing.T) {
|
||||||
serverPrivateKey := key.NewNode()
|
serverPrivateKey := key.NewNode()
|
||||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
const numClients = 3
|
const numClients = 3
|
||||||
@ -305,7 +305,7 @@ func TestSendRecv(t *testing.T) {
|
|||||||
|
|
||||||
func TestSendFreeze(t *testing.T) {
|
func TestSendFreeze(t *testing.T) {
|
||||||
serverPrivateKey := key.NewNode()
|
serverPrivateKey := key.NewNode()
|
||||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
s.WriteTimeout = 100 * time.Millisecond
|
s.WriteTimeout = 100 * time.Millisecond
|
||||||
|
|
||||||
@ -549,7 +549,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a
|
|||||||
func newTestServer(t *testing.T, ctx context.Context) *testServer {
|
func newTestServer(t *testing.T, ctx context.Context) *testServer {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
logf := logger.WithPrefix(t.Logf, "derp-server: ")
|
logf := logger.WithPrefix(t.Logf, "derp-server: ")
|
||||||
s := derpserver.NewServer(key.NewNode(), logf)
|
s := derpserver.New(key.NewNode(), logf)
|
||||||
s.SetMeshKey(testMeshKey)
|
s.SetMeshKey(testMeshKey)
|
||||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -44,7 +44,7 @@ func TestSendRecv(t *testing.T) {
|
|||||||
clientKeys = append(clientKeys, priv.Public())
|
clientKeys = append(clientKeys, priv.Public())
|
||||||
}
|
}
|
||||||
|
|
||||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
httpsrv := &http.Server{
|
httpsrv := &http.Server{
|
||||||
@ -172,7 +172,7 @@ func waitConnect(t testing.TB, c *derphttp.Client) {
|
|||||||
|
|
||||||
func TestPing(t *testing.T) {
|
func TestPing(t *testing.T) {
|
||||||
serverPrivateKey := key.NewNode()
|
serverPrivateKey := key.NewNode()
|
||||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
httpsrv := &http.Server{
|
httpsrv := &http.Server{
|
||||||
@ -225,7 +225,7 @@ func TestPing(t *testing.T) {
|
|||||||
const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||||
|
|
||||||
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) {
|
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) {
|
||||||
s = derpserver.NewServer(k, t.Logf)
|
s = derpserver.New(k, t.Logf)
|
||||||
httpsrv := &http.Server{
|
httpsrv := &http.Server{
|
||||||
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
|
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
|
||||||
Handler: derpserver.Handler(s),
|
Handler: derpserver.Handler(s),
|
||||||
|
@ -57,8 +57,6 @@ import (
|
|||||||
"tailscale.com/version"
|
"tailscale.com/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Conn = derp.Conn
|
|
||||||
|
|
||||||
// verboseDropKeys is the set of destination public keys that should
|
// verboseDropKeys is the set of destination public keys that should
|
||||||
// verbosely log whenever DERP drops a packet.
|
// verbosely log whenever DERP drops a packet.
|
||||||
var verboseDropKeys = map[key.NodePublic]bool{}
|
var verboseDropKeys = map[key.NodePublic]bool{}
|
||||||
@ -181,7 +179,7 @@ type Server struct {
|
|||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
closed bool
|
closed bool
|
||||||
netConns map[Conn]chan struct{} // chan is closed when conn closes
|
netConns map[derp.Conn]chan struct{} // chan is closed when conn closes
|
||||||
clients map[key.NodePublic]*clientSet
|
clients map[key.NodePublic]*clientSet
|
||||||
watchers set.Set[*sclient] // mesh peers
|
watchers set.Set[*sclient] // mesh peers
|
||||||
// clientsMesh tracks all clients in the cluster, both locally
|
// clientsMesh tracks all clients in the cluster, both locally
|
||||||
@ -354,9 +352,9 @@ var bytesDropped = metrics.NewMultiLabelMap[dropReasonKindLabels](
|
|||||||
"DERP bytes dropped by reason and by kind",
|
"DERP bytes dropped by reason and by kind",
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewServer returns a new DERP server. It doesn't listen on its own.
|
// New returns a new DERP server. It doesn't listen on its own.
|
||||||
// Connections are given to it via Server.Accept.
|
// Connections are given to it via Server.Accept.
|
||||||
func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server {
|
func New(privateKey key.NodePrivate, logf logger.Logf) *Server {
|
||||||
var ms runtime.MemStats
|
var ms runtime.MemStats
|
||||||
runtime.ReadMemStats(&ms)
|
runtime.ReadMemStats(&ms)
|
||||||
|
|
||||||
@ -369,7 +367,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server {
|
|||||||
packetsRecvByKind: metrics.LabelMap{Label: "kind"},
|
packetsRecvByKind: metrics.LabelMap{Label: "kind"},
|
||||||
clients: map[key.NodePublic]*clientSet{},
|
clients: map[key.NodePublic]*clientSet{},
|
||||||
clientsMesh: map[key.NodePublic]PacketForwarder{},
|
clientsMesh: map[key.NodePublic]PacketForwarder{},
|
||||||
netConns: map[Conn]chan struct{}{},
|
netConns: map[derp.Conn]chan struct{}{},
|
||||||
memSys0: ms.Sys,
|
memSys0: ms.Sys,
|
||||||
watchers: set.Set[*sclient]{},
|
watchers: set.Set[*sclient]{},
|
||||||
peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{},
|
peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{},
|
||||||
@ -570,7 +568,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool {
|
|||||||
// on its own.
|
// on its own.
|
||||||
//
|
//
|
||||||
// Accept closes nc.
|
// Accept closes nc.
|
||||||
func (s *Server) Accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string) {
|
func (s *Server) Accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string) {
|
||||||
closed := make(chan struct{})
|
closed := make(chan struct{})
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
@ -910,7 +908,7 @@ func (s *Server) addWatcher(c *sclient) {
|
|||||||
go c.requestMeshUpdate()
|
go c.requestMeshUpdate()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error {
|
func (s *Server) accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error {
|
||||||
br := brw.Reader
|
br := brw.Reader
|
||||||
nc.SetDeadline(time.Now().Add(10 * time.Second))
|
nc.SetDeadline(time.Now().Add(10 * time.Second))
|
||||||
bw := &lazyBufioWriter{w: nc, lbw: brw.Writer}
|
bw := &lazyBufioWriter{w: nc, lbw: brw.Writer}
|
||||||
@ -1619,7 +1617,7 @@ type sclient struct {
|
|||||||
// Static after construction.
|
// Static after construction.
|
||||||
connNum int64 // process-wide unique counter, incremented each Accept
|
connNum int64 // process-wide unique counter, incremented each Accept
|
||||||
s *Server
|
s *Server
|
||||||
nc Conn
|
nc derp.Conn
|
||||||
key key.NodePublic
|
key key.NodePublic
|
||||||
info derp.ClientInfo
|
info derp.ClientInfo
|
||||||
logf logger.Logf
|
logf logger.Logf
|
@ -330,7 +330,7 @@ func TestMultiForwarder(t *testing.T) {
|
|||||||
func TestMetaCert(t *testing.T) {
|
func TestMetaCert(t *testing.T) {
|
||||||
priv := key.NewNode()
|
priv := key.NewNode()
|
||||||
pub := priv.Public()
|
pub := priv.Public()
|
||||||
s := NewServer(priv, t.Logf)
|
s := New(priv, t.Logf)
|
||||||
|
|
||||||
certBytes := s.MetaCert()
|
certBytes := s.MetaCert()
|
||||||
cert, err := x509.ParseCertificate(certBytes)
|
cert, err := x509.ParseCertificate(certBytes)
|
||||||
@ -368,7 +368,7 @@ func TestServerDupClients(t *testing.T) {
|
|||||||
|
|
||||||
// run starts a new test case and resets clients back to their zero values.
|
// run starts a new test case and resets clients back to their zero values.
|
||||||
run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) {
|
run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) {
|
||||||
s = NewServer(serverPriv, t.Logf)
|
s = New(serverPriv, t.Logf)
|
||||||
s.dupPolicy = dupPolicy
|
s.dupPolicy = dupPolicy
|
||||||
c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")}
|
c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")}
|
||||||
c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")}
|
c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")}
|
||||||
@ -618,7 +618,7 @@ func TestLimiter(t *testing.T) {
|
|||||||
// single Server instance with multiple concurrent client flows.
|
// single Server instance with multiple concurrent client flows.
|
||||||
func BenchmarkConcurrentStreams(b *testing.B) {
|
func BenchmarkConcurrentStreams(b *testing.B) {
|
||||||
serverPrivateKey := key.NewNode()
|
serverPrivateKey := key.NewNode()
|
||||||
s := NewServer(serverPrivateKey, logger.Discard)
|
s := New(serverPrivateKey, logger.Discard)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
@ -688,7 +688,7 @@ func BenchmarkSendRecv(b *testing.B) {
|
|||||||
|
|
||||||
func benchmarkSendRecvSize(b *testing.B, packetSize int) {
|
func benchmarkSendRecvSize(b *testing.B, packetSize int) {
|
||||||
serverPrivateKey := key.NewNode()
|
serverPrivateKey := key.NewNode()
|
||||||
s := NewServer(serverPrivateKey, logger.Discard)
|
s := New(serverPrivateKey, logger.Discard)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
k := key.NewNode()
|
k := key.NewNode()
|
||||||
|
@ -146,7 +146,7 @@ func TestDerpProber(t *testing.T) {
|
|||||||
func TestRunDerpProbeNodePair(t *testing.T) {
|
func TestRunDerpProbeNodePair(t *testing.T) {
|
||||||
// os.Setenv("DERP_DEBUG_LOGS", "true")
|
// os.Setenv("DERP_DEBUG_LOGS", "true")
|
||||||
serverPrivateKey := key.NewNode()
|
serverPrivateKey := key.NewNode()
|
||||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
httpsrv := &http.Server{
|
httpsrv := &http.Server{
|
||||||
|
@ -296,7 +296,7 @@ func exe() string {
|
|||||||
func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) {
|
func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
d := derpserver.NewServer(key.NewNode(), logf)
|
d := derpserver.New(key.NewNode(), logf)
|
||||||
|
|
||||||
ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0"))
|
ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -611,7 +611,7 @@ func newDERPServer() *derpServer {
|
|||||||
ts.Close()
|
ts.Close()
|
||||||
|
|
||||||
ds := &derpServer{
|
ds := &derpServer{
|
||||||
srv: derpserver.NewServer(key.NewNode(), logger.Discard),
|
srv: derpserver.New(key.NewNode(), logger.Discard),
|
||||||
tlsConfig: ts.TLS, // self-signed; test client configure to not check
|
tlsConfig: ts.TLS, // self-signed; test client configure to not check
|
||||||
}
|
}
|
||||||
var mux http.ServeMux
|
var mux http.ServeMux
|
||||||
|
@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
|
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
|
||||||
d := derpserver.NewServer(key.NewNode(), logf)
|
d := derpserver.New(key.NewNode(), logf)
|
||||||
|
|
||||||
httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d))
|
httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d))
|
||||||
httpsrv.Config.ErrorLog = logger.StdLogger(logf)
|
httpsrv.Config.ErrorLog = logger.StdLogger(logf)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user