mirror of
https://github.com/tailscale/tailscale.git
synced 2025-10-04 03:51:42 +02:00
derp/derpserver: clean up extraction of derp.Server (#17264)
PR #17258 extracted `derp.Server` into `derp/derpserver.Server`. This followup patch adds the following cleanups: 1. Rename `derp_server*.go` files to `derpserver*.go` to match the package name. 2. Rename the `derpserver.NewServer` constructor to `derpserver.New` to reduce stuttering. 3. Remove the unnecessary `derpserver.Conn` type alias. Updates #17257 Updates #cleanup Signed-off-by: Simon Law <sfllaw@tailscale.com>
This commit is contained in:
parent
db02a46645
commit
34242df51b
@ -131,7 +131,7 @@ func TestPinnedCertRawIP(t *testing.T) {
|
||||
}
|
||||
defer ln.Close()
|
||||
|
||||
ds := derpserver.NewServer(key.NewNode(), t.Logf)
|
||||
ds := derpserver.New(key.NewNode(), t.Logf)
|
||||
|
||||
derpHandler := derpserver.Handler(ds)
|
||||
mux := http.NewServeMux()
|
||||
|
@ -188,7 +188,7 @@ func main() {
|
||||
|
||||
serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual"
|
||||
|
||||
s := derpserver.NewServer(cfg.PrivateKey, log.Printf)
|
||||
s := derpserver.New(cfg.PrivateKey, log.Printf)
|
||||
s.SetVerifyClient(*verifyClients)
|
||||
s.SetTailscaledSocketPath(*socket)
|
||||
s.SetVerifyClientURL(*verifyClientURL)
|
||||
|
@ -83,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) {
|
||||
|
||||
func TestSendRecv(t *testing.T) {
|
||||
serverPrivateKey := key.NewNode()
|
||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
||||
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||
defer s.Close()
|
||||
|
||||
const numClients = 3
|
||||
@ -305,7 +305,7 @@ func TestSendRecv(t *testing.T) {
|
||||
|
||||
func TestSendFreeze(t *testing.T) {
|
||||
serverPrivateKey := key.NewNode()
|
||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
||||
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||
defer s.Close()
|
||||
s.WriteTimeout = 100 * time.Millisecond
|
||||
|
||||
@ -549,7 +549,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a
|
||||
func newTestServer(t *testing.T, ctx context.Context) *testServer {
|
||||
t.Helper()
|
||||
logf := logger.WithPrefix(t.Logf, "derp-server: ")
|
||||
s := derpserver.NewServer(key.NewNode(), logf)
|
||||
s := derpserver.New(key.NewNode(), logf)
|
||||
s.SetMeshKey(testMeshKey)
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
|
@ -44,7 +44,7 @@ func TestSendRecv(t *testing.T) {
|
||||
clientKeys = append(clientKeys, priv.Public())
|
||||
}
|
||||
|
||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
||||
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||
defer s.Close()
|
||||
|
||||
httpsrv := &http.Server{
|
||||
@ -172,7 +172,7 @@ func waitConnect(t testing.TB, c *derphttp.Client) {
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
serverPrivateKey := key.NewNode()
|
||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
||||
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||
defer s.Close()
|
||||
|
||||
httpsrv := &http.Server{
|
||||
@ -225,7 +225,7 @@ func TestPing(t *testing.T) {
|
||||
const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
|
||||
func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) {
|
||||
s = derpserver.NewServer(k, t.Logf)
|
||||
s = derpserver.New(k, t.Logf)
|
||||
httpsrv := &http.Server{
|
||||
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
|
||||
Handler: derpserver.Handler(s),
|
||||
|
@ -57,8 +57,6 @@ import (
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
type Conn = derp.Conn
|
||||
|
||||
// verboseDropKeys is the set of destination public keys that should
|
||||
// verbosely log whenever DERP drops a packet.
|
||||
var verboseDropKeys = map[key.NodePublic]bool{}
|
||||
@ -181,7 +179,7 @@ type Server struct {
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
netConns map[Conn]chan struct{} // chan is closed when conn closes
|
||||
netConns map[derp.Conn]chan struct{} // chan is closed when conn closes
|
||||
clients map[key.NodePublic]*clientSet
|
||||
watchers set.Set[*sclient] // mesh peers
|
||||
// clientsMesh tracks all clients in the cluster, both locally
|
||||
@ -354,9 +352,9 @@ var bytesDropped = metrics.NewMultiLabelMap[dropReasonKindLabels](
|
||||
"DERP bytes dropped by reason and by kind",
|
||||
)
|
||||
|
||||
// NewServer returns a new DERP server. It doesn't listen on its own.
|
||||
// New returns a new DERP server. It doesn't listen on its own.
|
||||
// Connections are given to it via Server.Accept.
|
||||
func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server {
|
||||
func New(privateKey key.NodePrivate, logf logger.Logf) *Server {
|
||||
var ms runtime.MemStats
|
||||
runtime.ReadMemStats(&ms)
|
||||
|
||||
@ -369,7 +367,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server {
|
||||
packetsRecvByKind: metrics.LabelMap{Label: "kind"},
|
||||
clients: map[key.NodePublic]*clientSet{},
|
||||
clientsMesh: map[key.NodePublic]PacketForwarder{},
|
||||
netConns: map[Conn]chan struct{}{},
|
||||
netConns: map[derp.Conn]chan struct{}{},
|
||||
memSys0: ms.Sys,
|
||||
watchers: set.Set[*sclient]{},
|
||||
peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{},
|
||||
@ -570,7 +568,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool {
|
||||
// on its own.
|
||||
//
|
||||
// Accept closes nc.
|
||||
func (s *Server) Accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string) {
|
||||
func (s *Server) Accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string) {
|
||||
closed := make(chan struct{})
|
||||
|
||||
s.mu.Lock()
|
||||
@ -910,7 +908,7 @@ func (s *Server) addWatcher(c *sclient) {
|
||||
go c.requestMeshUpdate()
|
||||
}
|
||||
|
||||
func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error {
|
||||
func (s *Server) accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error {
|
||||
br := brw.Reader
|
||||
nc.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
bw := &lazyBufioWriter{w: nc, lbw: brw.Writer}
|
||||
@ -1619,7 +1617,7 @@ type sclient struct {
|
||||
// Static after construction.
|
||||
connNum int64 // process-wide unique counter, incremented each Accept
|
||||
s *Server
|
||||
nc Conn
|
||||
nc derp.Conn
|
||||
key key.NodePublic
|
||||
info derp.ClientInfo
|
||||
logf logger.Logf
|
@ -330,7 +330,7 @@ func TestMultiForwarder(t *testing.T) {
|
||||
func TestMetaCert(t *testing.T) {
|
||||
priv := key.NewNode()
|
||||
pub := priv.Public()
|
||||
s := NewServer(priv, t.Logf)
|
||||
s := New(priv, t.Logf)
|
||||
|
||||
certBytes := s.MetaCert()
|
||||
cert, err := x509.ParseCertificate(certBytes)
|
||||
@ -368,7 +368,7 @@ func TestServerDupClients(t *testing.T) {
|
||||
|
||||
// run starts a new test case and resets clients back to their zero values.
|
||||
run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) {
|
||||
s = NewServer(serverPriv, t.Logf)
|
||||
s = New(serverPriv, t.Logf)
|
||||
s.dupPolicy = dupPolicy
|
||||
c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")}
|
||||
c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")}
|
||||
@ -618,7 +618,7 @@ func TestLimiter(t *testing.T) {
|
||||
// single Server instance with multiple concurrent client flows.
|
||||
func BenchmarkConcurrentStreams(b *testing.B) {
|
||||
serverPrivateKey := key.NewNode()
|
||||
s := NewServer(serverPrivateKey, logger.Discard)
|
||||
s := New(serverPrivateKey, logger.Discard)
|
||||
defer s.Close()
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
@ -688,7 +688,7 @@ func BenchmarkSendRecv(b *testing.B) {
|
||||
|
||||
func benchmarkSendRecvSize(b *testing.B, packetSize int) {
|
||||
serverPrivateKey := key.NewNode()
|
||||
s := NewServer(serverPrivateKey, logger.Discard)
|
||||
s := New(serverPrivateKey, logger.Discard)
|
||||
defer s.Close()
|
||||
|
||||
k := key.NewNode()
|
||||
|
@ -146,7 +146,7 @@ func TestDerpProber(t *testing.T) {
|
||||
func TestRunDerpProbeNodePair(t *testing.T) {
|
||||
// os.Setenv("DERP_DEBUG_LOGS", "true")
|
||||
serverPrivateKey := key.NewNode()
|
||||
s := derpserver.NewServer(serverPrivateKey, t.Logf)
|
||||
s := derpserver.New(serverPrivateKey, t.Logf)
|
||||
defer s.Close()
|
||||
|
||||
httpsrv := &http.Server{
|
||||
|
@ -296,7 +296,7 @@ func exe() string {
|
||||
func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) {
|
||||
t.Helper()
|
||||
|
||||
d := derpserver.NewServer(key.NewNode(), logf)
|
||||
d := derpserver.New(key.NewNode(), logf)
|
||||
|
||||
ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0"))
|
||||
if err != nil {
|
||||
|
@ -611,7 +611,7 @@ func newDERPServer() *derpServer {
|
||||
ts.Close()
|
||||
|
||||
ds := &derpServer{
|
||||
srv: derpserver.NewServer(key.NewNode(), logger.Discard),
|
||||
srv: derpserver.New(key.NewNode(), logger.Discard),
|
||||
tlsConfig: ts.TLS, // self-signed; test client configure to not check
|
||||
}
|
||||
var mux http.ServeMux
|
||||
|
@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) {
|
||||
}
|
||||
|
||||
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
|
||||
d := derpserver.NewServer(key.NewNode(), logf)
|
||||
d := derpserver.New(key.NewNode(), logf)
|
||||
|
||||
httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d))
|
||||
httpsrv.Config.ErrorLog = logger.StdLogger(logf)
|
||||
|
Loading…
x
Reference in New Issue
Block a user