mirror of
https://github.com/tailscale/tailscale.git
synced 2025-12-03 16:31:54 +01:00
all: rename variables with lowercase-l/uppercase-I
See http://go/no-ell Signed-off-by: Alex Chan <alexc@tailscale.com> Updates #cleanup Change-Id: I8c976b51ce7a60f06315048b1920516129cc1d5d
This commit is contained in:
parent
9048ea25db
commit
c2e474e729
@ -203,12 +203,12 @@ func NewAppConnector(c Config) *AppConnector {
|
||||
ac.wildcards = c.RouteInfo.Wildcards
|
||||
ac.controlRoutes = c.RouteInfo.Control
|
||||
}
|
||||
ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) {
|
||||
ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l)
|
||||
metricStoreRoutes(c, l)
|
||||
ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, ln int64) {
|
||||
ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, ln)
|
||||
metricStoreRoutes(c, ln)
|
||||
})
|
||||
ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) {
|
||||
ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l)
|
||||
ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, ln int64) {
|
||||
ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, ln)
|
||||
})
|
||||
return ac
|
||||
}
|
||||
@ -510,8 +510,8 @@ func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) {
|
||||
slices.SortFunc(e.domains[domain], compareAddr)
|
||||
}
|
||||
|
||||
func compareAddr(l, r netip.Addr) int {
|
||||
return l.Compare(r)
|
||||
func compareAddr(a, b netip.Addr) int {
|
||||
return a.Compare(b)
|
||||
}
|
||||
|
||||
// routesWithout returns a without b where a and b
|
||||
|
||||
@ -31,11 +31,11 @@ func TestDoesNotOverwriteIrregularFiles(t *testing.T) {
|
||||
|
||||
// The least troublesome thing to make that is not a file is a unix socket.
|
||||
// Making a null device sadly requires root.
|
||||
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"})
|
||||
ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
defer ln.Close()
|
||||
|
||||
err = WriteFile(path, []byte("hello"), 0644)
|
||||
if err == nil {
|
||||
|
||||
@ -24,7 +24,7 @@ type fakeBIRD struct {
|
||||
|
||||
func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
|
||||
sock := filepath.Join(t.TempDir(), "sock")
|
||||
l, err := net.Listen("unix", sock)
|
||||
ln, err := net.Listen("unix", sock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -33,7 +33,7 @@ func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD {
|
||||
pe[p] = false
|
||||
}
|
||||
return &fakeBIRD{
|
||||
Listener: l,
|
||||
Listener: ln,
|
||||
protocolsEnabled: pe,
|
||||
sock: sock,
|
||||
}
|
||||
@ -123,12 +123,12 @@ type hangingListener struct {
|
||||
|
||||
func newHangingListener(t *testing.T) *hangingListener {
|
||||
sock := filepath.Join(t.TempDir(), "sock")
|
||||
l, err := net.Listen("unix", sock)
|
||||
ln, err := net.Listen("unix", sock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &hangingListener{
|
||||
Listener: l,
|
||||
Listener: ln,
|
||||
t: t,
|
||||
done: make(chan struct{}),
|
||||
sock: sock,
|
||||
|
||||
@ -66,7 +66,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
|
||||
// match from a list of exit node `options` to `nodes`.
|
||||
const addBestMatchNode = (
|
||||
options: ExitNode[],
|
||||
name: (l: ExitNodeLocation) => string
|
||||
name: (loc: ExitNodeLocation) => string
|
||||
) => {
|
||||
const bestNode = highestPriorityNode(options)
|
||||
if (!bestNode || !bestNode.Location) {
|
||||
@ -86,7 +86,7 @@ export default function useExitNodes(node: NodeData, filter?: string) {
|
||||
locationNodesMap.forEach(
|
||||
// add one node per country
|
||||
(countryNodes) =>
|
||||
addBestMatchNode(flattenMap(countryNodes), (l) => l.Country)
|
||||
addBestMatchNode(flattenMap(countryNodes), (loc) => loc.Country)
|
||||
)
|
||||
} else {
|
||||
// Otherwise, show the best match on a city-level,
|
||||
@ -97,12 +97,12 @@ export default function useExitNodes(node: NodeData, filter?: string) {
|
||||
countryNodes.forEach(
|
||||
// add one node per city
|
||||
(cityNodes) =>
|
||||
addBestMatchNode(cityNodes, (l) => `${l.Country}: ${l.City}`)
|
||||
addBestMatchNode(cityNodes, (loc) => `${loc.Country}: ${loc.City}`)
|
||||
)
|
||||
// add the "Country: Best Match" node
|
||||
addBestMatchNode(
|
||||
flattenMap(countryNodes),
|
||||
(l) => `${l.Country}: Best Match`
|
||||
(loc) => `${loc.Country}: Best Match`
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
@ -418,13 +418,13 @@ func parseSynoinfo(path string) (string, error) {
|
||||
// Extract the CPU in the middle (88f6282 in the above example).
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
l := s.Text()
|
||||
if !strings.HasPrefix(l, "unique=") {
|
||||
line := s.Text()
|
||||
if !strings.HasPrefix(line, "unique=") {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(l, "_", 3)
|
||||
parts := strings.SplitN(line, "_", 3)
|
||||
if len(parts) != 3 {
|
||||
return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, l)
|
||||
return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, line)
|
||||
}
|
||||
return parts[1], nil
|
||||
}
|
||||
|
||||
@ -1287,8 +1287,8 @@ type localAPI struct {
|
||||
notify *ipn.Notify
|
||||
}
|
||||
|
||||
func (l *localAPI) Start() error {
|
||||
path := filepath.Join(l.FSRoot, "tmp/tailscaled.sock.fake")
|
||||
func (lc *localAPI) Start() error {
|
||||
path := filepath.Join(lc.FSRoot, "tmp/tailscaled.sock.fake")
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1298,30 +1298,30 @@ func (l *localAPI) Start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
l.srv = &http.Server{
|
||||
Handler: l,
|
||||
lc.srv = &http.Server{
|
||||
Handler: lc,
|
||||
}
|
||||
l.Path = path
|
||||
l.cond = sync.NewCond(&l.Mutex)
|
||||
go l.srv.Serve(ln)
|
||||
lc.Path = path
|
||||
lc.cond = sync.NewCond(&lc.Mutex)
|
||||
go lc.srv.Serve(ln)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localAPI) Close() {
|
||||
l.srv.Close()
|
||||
func (lc *localAPI) Close() {
|
||||
lc.srv.Close()
|
||||
}
|
||||
|
||||
func (l *localAPI) Notify(n *ipn.Notify) {
|
||||
func (lc *localAPI) Notify(n *ipn.Notify) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.notify = n
|
||||
l.cond.Broadcast()
|
||||
lc.Lock()
|
||||
defer lc.Unlock()
|
||||
lc.notify = n
|
||||
lc.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/localapi/v0/serve-config":
|
||||
if r.Method != "POST" {
|
||||
@ -1348,11 +1348,11 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
f.Flush()
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
lc.Lock()
|
||||
defer lc.Unlock()
|
||||
for {
|
||||
if l.notify != nil {
|
||||
if err := enc.Encode(l.notify); err != nil {
|
||||
if lc.notify != nil {
|
||||
if err := enc.Encode(lc.notify); err != nil {
|
||||
// Usually broken pipe as the test client disconnects.
|
||||
return
|
||||
}
|
||||
@ -1360,7 +1360,7 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
l.cond.Wait()
|
||||
lc.cond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -481,32 +481,32 @@ func newRateLimitedListener(ln net.Listener, limit rate.Limit, burst int) *rateL
|
||||
return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)}
|
||||
}
|
||||
|
||||
func (l *rateLimitedListener) ExpVar() expvar.Var {
|
||||
func (ln *rateLimitedListener) ExpVar() expvar.Var {
|
||||
m := new(metrics.Set)
|
||||
m.Set("counter_accepted_connections", &l.numAccepts)
|
||||
m.Set("counter_rejected_connections", &l.numRejects)
|
||||
m.Set("counter_accepted_connections", &ln.numAccepts)
|
||||
m.Set("counter_rejected_connections", &ln.numRejects)
|
||||
return m
|
||||
}
|
||||
|
||||
var errLimitedConn = errors.New("cannot accept connection; rate limited")
|
||||
|
||||
func (l *rateLimitedListener) Accept() (net.Conn, error) {
|
||||
func (ln *rateLimitedListener) Accept() (net.Conn, error) {
|
||||
// Even under a rate limited situation, we accept the connection immediately
|
||||
// and close it, rather than being slow at accepting new connections.
|
||||
// This provides two benefits: 1) it signals to the client that something
|
||||
// is going on on the server, and 2) it prevents new connections from
|
||||
// piling up and occupying resources in the OS kernel.
|
||||
// The client will retry as needing (with backoffs in place).
|
||||
cn, err := l.Listener.Accept()
|
||||
cn, err := ln.Listener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !l.lim.Allow() {
|
||||
l.numRejects.Add(1)
|
||||
if !ln.lim.Allow() {
|
||||
ln.numRejects.Add(1)
|
||||
cn.Close()
|
||||
return nil, errLimitedConn
|
||||
}
|
||||
l.numAccepts.Add(1)
|
||||
ln.numAccepts.Add(1)
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
|
||||
@ -36,21 +36,21 @@ type egressEpsReconciler struct {
|
||||
// It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired
|
||||
// configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready.
|
||||
func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
l := er.logger.With("Service", req.NamespacedName)
|
||||
l.Debugf("starting reconcile")
|
||||
defer l.Debugf("reconcile finished")
|
||||
lg := er.logger.With("Service", req.NamespacedName)
|
||||
lg.Debugf("starting reconcile")
|
||||
defer lg.Debugf("reconcile finished")
|
||||
|
||||
eps := new(discoveryv1.EndpointSlice)
|
||||
err = er.Get(ctx, req.NamespacedName, eps)
|
||||
if apierrors.IsNotFound(err) {
|
||||
l.Debugf("EndpointSlice not found")
|
||||
lg.Debugf("EndpointSlice not found")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err)
|
||||
}
|
||||
if !eps.DeletionTimestamp.IsZero() {
|
||||
l.Debugf("EnpointSlice is being deleted")
|
||||
lg.Debugf("EnpointSlice is being deleted")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
}
|
||||
err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc)
|
||||
if apierrors.IsNotFound(err) {
|
||||
l.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name)
|
||||
lg.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name)
|
||||
return res, nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -77,7 +77,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
|
||||
oldEps := eps.DeepCopy()
|
||||
tailnetSvc := tailnetSvcName(svc)
|
||||
l = l.With("tailnet-service-name", tailnetSvc)
|
||||
lg = lg.With("tailnet-service-name", tailnetSvc)
|
||||
|
||||
// Retrieve the desired tailnet service configuration from the ConfigMap.
|
||||
proxyGroupName := eps.Labels[labelProxyGroup]
|
||||
@ -88,12 +88,12 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
if cfgs == nil {
|
||||
// TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later
|
||||
// got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow.
|
||||
l.Debugf("No egress config found, likely because ProxyGroup has not been created")
|
||||
lg.Debugf("No egress config found, likely because ProxyGroup has not been created")
|
||||
return res, nil
|
||||
}
|
||||
cfg, ok := (*cfgs)[tailnetSvc]
|
||||
if !ok {
|
||||
l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc)
|
||||
lg.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
}
|
||||
newEndpoints := make([]discoveryv1.Endpoint, 0)
|
||||
for _, pod := range podList.Items {
|
||||
ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, l)
|
||||
ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, lg)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err)
|
||||
}
|
||||
@ -130,7 +130,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
// run a cleanup for deleted Pods etc.
|
||||
eps.Endpoints = newEndpoints
|
||||
if !reflect.DeepEqual(eps, oldEps) {
|
||||
l.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods")
|
||||
lg.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods")
|
||||
if err := er.Update(ctx, eps); err != nil {
|
||||
return res, fmt.Errorf("error updating EndpointSlice: %w", err)
|
||||
}
|
||||
@ -154,11 +154,11 @@ func podIPv4(pod *corev1.Pod) (string, error) {
|
||||
// podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to
|
||||
// route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service
|
||||
// status written there to the desired service configuration.
|
||||
func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, l *zap.SugaredLogger) (bool, error) {
|
||||
l = l.With("proxy_pod", pod.Name)
|
||||
l.Debugf("checking whether proxy is ready to route to egress service")
|
||||
func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, lg *zap.SugaredLogger) (bool, error) {
|
||||
lg = lg.With("proxy_pod", pod.Name)
|
||||
lg.Debugf("checking whether proxy is ready to route to egress service")
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
l.Debugf("proxy Pod is being deleted, ignore")
|
||||
lg.Debugf("proxy Pod is being deleted, ignore")
|
||||
return false, nil
|
||||
}
|
||||
podIP, err := podIPv4(&pod)
|
||||
@ -166,7 +166,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
|
||||
return false, fmt.Errorf("error determining Pod IP address: %v", err)
|
||||
}
|
||||
if podIP == "" {
|
||||
l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported")
|
||||
lg.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported")
|
||||
return false, nil
|
||||
}
|
||||
stateS := &corev1.Secret{
|
||||
@ -177,7 +177,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
|
||||
}
|
||||
err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS)
|
||||
if apierrors.IsNotFound(err) {
|
||||
l.Debugf("proxy does not have a state Secret, waiting...")
|
||||
lg.Debugf("proxy does not have a state Secret, waiting...")
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -185,7 +185,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
|
||||
}
|
||||
svcStatusBS := stateS.Data[egressservices.KeyEgressServices]
|
||||
if len(svcStatusBS) == 0 {
|
||||
l.Debugf("proxy's state Secret does not contain egress services status, waiting...")
|
||||
lg.Debugf("proxy's state Secret does not contain egress services status, waiting...")
|
||||
return false, nil
|
||||
}
|
||||
svcStatus := &egressservices.Status{}
|
||||
@ -193,22 +193,22 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod
|
||||
return false, fmt.Errorf("error unmarshalling egress service status: %w", err)
|
||||
}
|
||||
if !strings.EqualFold(podIP, svcStatus.PodIPv4) {
|
||||
l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP)
|
||||
lg.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP)
|
||||
return false, nil
|
||||
}
|
||||
st, ok := (*svcStatus).Services[tailnetSvcName]
|
||||
if !ok {
|
||||
l.Infof("proxy's state Secret does not have egress service status, waiting...")
|
||||
lg.Infof("proxy's state Secret does not have egress service status, waiting...")
|
||||
return false, nil
|
||||
}
|
||||
if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) {
|
||||
l.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget)
|
||||
lg.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget)
|
||||
return false, nil
|
||||
}
|
||||
if !reflect.DeepEqual(cfg.Ports, st.Ports) {
|
||||
l.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports)
|
||||
lg.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports)
|
||||
return false, nil
|
||||
}
|
||||
l.Debugf("proxy is ready to route traffic to egress service")
|
||||
lg.Debugf("proxy is ready to route traffic to egress service")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -71,9 +71,9 @@ type egressPodsReconciler struct {
|
||||
// If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the
|
||||
// readiness condition for backwards compatibility reasons.
|
||||
func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
l := er.logger.With("Pod", req.NamespacedName)
|
||||
l.Debugf("starting reconcile")
|
||||
defer l.Debugf("reconcile finished")
|
||||
lg := er.logger.With("Pod", req.NamespacedName)
|
||||
lg.Debugf("starting reconcile")
|
||||
defer lg.Debugf("reconcile finished")
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
err = er.Get(ctx, req.NamespacedName, pod)
|
||||
@ -84,11 +84,11 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err)
|
||||
}
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
l.Debugf("Pod is being deleted, do nothing")
|
||||
lg.Debugf("Pod is being deleted, do nothing")
|
||||
return res, nil
|
||||
}
|
||||
if pod.Labels[LabelParentType] != proxyTypeProxyGroup {
|
||||
l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod")
|
||||
lg.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@ -97,7 +97,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
|
||||
if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool {
|
||||
return r.ConditionType == tsEgressReadinessGate
|
||||
}) {
|
||||
l.Debug("Pod does not have egress readiness gate set, skipping")
|
||||
lg.Debug("Pod does not have egress readiness gate set, skipping")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
|
||||
return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err)
|
||||
}
|
||||
if pg.Spec.Type != typeEgress {
|
||||
l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type)
|
||||
lg.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type)
|
||||
return res, nil
|
||||
}
|
||||
// Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup.
|
||||
@ -125,7 +125,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
|
||||
return c.Type == tsEgressReadinessGate
|
||||
})
|
||||
if idx != -1 {
|
||||
l.Debugf("Pod is already ready, do nothing")
|
||||
lg.Debugf("Pod is already ready, do nothing")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@ -134,7 +134,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
|
||||
for _, svc := range svcs.Items {
|
||||
s := svc
|
||||
go func() {
|
||||
ll := l.With("service_name", s.Name)
|
||||
ll := lg.With("service_name", s.Name)
|
||||
d := retrieveClusterDomain(er.tsNamespace, ll)
|
||||
healthCheckAddr := healthCheckForSvc(&s, d)
|
||||
if healthCheckAddr == "" {
|
||||
@ -178,22 +178,22 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req
|
||||
return res, fmt.Errorf("error verifying conectivity: %w", err)
|
||||
}
|
||||
if rm := routesMissing.Load(); rm {
|
||||
l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...")
|
||||
lg.Info("Pod is not yet added as an endpoint for all egress targets, waiting...")
|
||||
return reconcile.Result{RequeueAfter: shortRequeue}, nil
|
||||
}
|
||||
if err := er.setPodReady(ctx, pod, l); err != nil {
|
||||
if err := er.setPodReady(ctx, pod, lg); err != nil {
|
||||
return res, fmt.Errorf("error setting Pod as ready: %w", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error {
|
||||
func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, lg *zap.SugaredLogger) error {
|
||||
if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool {
|
||||
return c.Type == tsEgressReadinessGate
|
||||
}) {
|
||||
return nil
|
||||
}
|
||||
l.Infof("Pod is ready to route traffic to all egress targets")
|
||||
lg.Infof("Pod is ready to route traffic to all egress targets")
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{
|
||||
Type: tsEgressReadinessGate,
|
||||
Status: corev1.ConditionTrue,
|
||||
@ -216,11 +216,11 @@ const (
|
||||
)
|
||||
|
||||
// lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check.
|
||||
func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) {
|
||||
func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, lg *zap.SugaredLogger) (healthCheckState, error) {
|
||||
if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool {
|
||||
return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true"
|
||||
}) {
|
||||
l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service")
|
||||
lg.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service")
|
||||
return cannotVerify, nil
|
||||
}
|
||||
wantsIP, err := podIPv4(pod)
|
||||
@ -248,7 +248,7 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c
|
||||
defer resp.Body.Close()
|
||||
gotIP := resp.Header.Get(kubetypes.PodIPv4Header)
|
||||
if gotIP == "" {
|
||||
l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service")
|
||||
lg.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service")
|
||||
return cannotVerify, nil
|
||||
}
|
||||
if !strings.EqualFold(wantsIP, gotIP) {
|
||||
|
||||
@ -47,13 +47,13 @@ type egressSvcsReadinessReconciler struct {
|
||||
// route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress
|
||||
// service to determine how many replicas are currently able to route traffic.
|
||||
func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
l := esrr.logger.With("Service", req.NamespacedName)
|
||||
l.Debugf("starting reconcile")
|
||||
defer l.Debugf("reconcile finished")
|
||||
lg := esrr.logger.With("Service", req.NamespacedName)
|
||||
lg.Debugf("starting reconcile")
|
||||
defer lg.Debugf("reconcile finished")
|
||||
|
||||
svc := new(corev1.Service)
|
||||
if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) {
|
||||
l.Debugf("Service not found")
|
||||
lg.Debugf("Service not found")
|
||||
return res, nil
|
||||
} else if err != nil {
|
||||
return res, fmt.Errorf("failed to get Service: %w", err)
|
||||
@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
)
|
||||
oldStatus := svc.Status.DeepCopy()
|
||||
defer func() {
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, lg)
|
||||
if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) {
|
||||
err = errors.Join(err, esrr.Status().Update(ctx, svc))
|
||||
}
|
||||
@ -79,7 +79,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
return res, err
|
||||
}
|
||||
if eps == nil {
|
||||
l.Infof("EndpointSlice for Service does not yet exist, waiting...")
|
||||
lg.Infof("EndpointSlice for Service does not yet exist, waiting...")
|
||||
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
|
||||
st = metav1.ConditionFalse
|
||||
return res, nil
|
||||
@ -91,7 +91,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
}
|
||||
err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg)
|
||||
if apierrors.IsNotFound(err) {
|
||||
l.Infof("ProxyGroup for Service does not exist, waiting...")
|
||||
lg.Infof("ProxyGroup for Service does not exist, waiting...")
|
||||
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
|
||||
st = metav1.ConditionFalse
|
||||
return res, nil
|
||||
@ -103,7 +103,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
return res, err
|
||||
}
|
||||
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||
l.Infof("ProxyGroup for Service is not ready, waiting...")
|
||||
lg.Infof("ProxyGroup for Service is not ready, waiting...")
|
||||
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
|
||||
st = metav1.ConditionFalse
|
||||
return res, nil
|
||||
@ -111,7 +111,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
|
||||
replicas := pgReplicas(pg)
|
||||
if replicas == 0 {
|
||||
l.Infof("ProxyGroup replicas set to 0")
|
||||
lg.Infof("ProxyGroup replicas set to 0")
|
||||
reason, msg = reasonNoProxies, reasonNoProxies
|
||||
st = metav1.ConditionFalse
|
||||
return res, nil
|
||||
@ -128,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
return res, err
|
||||
}
|
||||
if pod == nil {
|
||||
l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i)
|
||||
lg.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i)
|
||||
reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady
|
||||
return res, nil
|
||||
}
|
||||
l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs)
|
||||
lg.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs)
|
||||
ready := false
|
||||
for _, ep := range eps.Endpoints {
|
||||
l.Debugf("looking at endpoint with addresses %v", ep.Addresses)
|
||||
if endpointReadyForPod(&ep, pod, l) {
|
||||
l.Debugf("endpoint is ready for Pod")
|
||||
lg.Debugf("looking at endpoint with addresses %v", ep.Addresses)
|
||||
if endpointReadyForPod(&ep, pod, lg) {
|
||||
lg.Debugf("endpoint is ready for Pod")
|
||||
ready = true
|
||||
break
|
||||
}
|
||||
@ -163,10 +163,10 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
|
||||
// endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic.
|
||||
// Endpoint must not be nil.
|
||||
func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool {
|
||||
func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, lg *zap.SugaredLogger) bool {
|
||||
podIP, err := podIPv4(pod)
|
||||
if err != nil {
|
||||
l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err)
|
||||
lg.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err)
|
||||
return false
|
||||
}
|
||||
// Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this.
|
||||
|
||||
@ -49,12 +49,12 @@ func TestEgressServiceReadiness(t *testing.T) {
|
||||
},
|
||||
}
|
||||
fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}}
|
||||
l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc)
|
||||
labels := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc)
|
||||
eps := &discoveryv1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app",
|
||||
Namespace: "operator-ns",
|
||||
Labels: l,
|
||||
Labels: labels,
|
||||
},
|
||||
AddressType: discoveryv1.AddressTypeIPv4,
|
||||
}
|
||||
@ -118,26 +118,26 @@ func TestEgressServiceReadiness(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) {
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l)
|
||||
func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger) {
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, lg)
|
||||
}
|
||||
|
||||
func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) {
|
||||
func setNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas int32) {
|
||||
msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, lg)
|
||||
}
|
||||
|
||||
func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) {
|
||||
func setReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas, readyReplicas int32) {
|
||||
reason := reasonPartiallyReady
|
||||
if readyReplicas == replicas {
|
||||
reason = reasonReady
|
||||
}
|
||||
msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, lg)
|
||||
}
|
||||
|
||||
func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) {
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l)
|
||||
func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, lg *zap.SugaredLogger) {
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, lg)
|
||||
}
|
||||
|
||||
func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) {
|
||||
@ -153,14 +153,14 @@ func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1
|
||||
}
|
||||
|
||||
func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod {
|
||||
l := pgLabels(pg.Name, nil)
|
||||
l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal)
|
||||
labels := pgLabels(pg.Name, nil)
|
||||
labels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal)
|
||||
ip := fmt.Sprintf("10.0.0.%d", ordinal)
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", pg.Name, ordinal),
|
||||
Namespace: "operator-ns",
|
||||
Labels: l,
|
||||
Labels: labels,
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
PodIPs: []corev1.PodIP{{IP: ip}},
|
||||
|
||||
@ -98,12 +98,12 @@ type egressSvcsReconciler struct {
|
||||
// - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the
|
||||
// portmappings.
|
||||
func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
l := esr.logger.With("Service", req.NamespacedName)
|
||||
defer l.Info("reconcile finished")
|
||||
lg := esr.logger.With("Service", req.NamespacedName)
|
||||
defer lg.Info("reconcile finished")
|
||||
|
||||
svc := new(corev1.Service)
|
||||
if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) {
|
||||
l.Info("Service not found")
|
||||
lg.Info("Service not found")
|
||||
return res, nil
|
||||
} else if err != nil {
|
||||
return res, fmt.Errorf("failed to get Service: %w", err)
|
||||
@ -111,7 +111,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
|
||||
// Name of the 'egress service', meaning the tailnet target.
|
||||
tailnetSvc := tailnetSvcName(svc)
|
||||
l = l.With("tailnet-service", tailnetSvc)
|
||||
lg = lg.With("tailnet-service", tailnetSvc)
|
||||
|
||||
// Note that resources for egress Services are only cleaned up when the
|
||||
// Service is actually deleted (and not if, for example, user decides to
|
||||
@ -119,8 +119,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
// assume that the egress ExternalName Services are always created for
|
||||
// Tailscale operator specifically.
|
||||
if !svc.DeletionTimestamp.IsZero() {
|
||||
l.Info("Service is being deleted, ensuring resource cleanup")
|
||||
return res, esr.maybeCleanup(ctx, svc, l)
|
||||
lg.Info("Service is being deleted, ensuring resource cleanup")
|
||||
return res, esr.maybeCleanup(ctx, svc, lg)
|
||||
}
|
||||
|
||||
oldStatus := svc.Status.DeepCopy()
|
||||
@ -131,7 +131,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
}()
|
||||
|
||||
// Validate the user-created ExternalName Service and the associated ProxyGroup.
|
||||
if ok, err := esr.validateClusterResources(ctx, svc, l); err != nil {
|
||||
if ok, err := esr.validateClusterResources(ctx, svc, lg); err != nil {
|
||||
return res, fmt.Errorf("error validating cluster resources: %w", err)
|
||||
} else if !ok {
|
||||
return res, nil
|
||||
@ -141,8 +141,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
svc.Finalizers = append(svc.Finalizers, FinalizerName)
|
||||
if err := esr.updateSvcSpec(ctx, svc); err != nil {
|
||||
err := fmt.Errorf("failed to add finalizer: %w", err)
|
||||
r := svcConfiguredReason(svc, false, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l)
|
||||
r := svcConfiguredReason(svc, false, lg)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg)
|
||||
return res, err
|
||||
}
|
||||
esr.mu.Lock()
|
||||
@ -151,16 +151,16 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
esr.mu.Unlock()
|
||||
}
|
||||
|
||||
if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, l); err != nil {
|
||||
if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, lg); err != nil {
|
||||
err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err)
|
||||
r := svcConfiguredReason(svc, false, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l)
|
||||
r := svcConfiguredReason(svc, false, lg)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg)
|
||||
return res, err
|
||||
}
|
||||
|
||||
if err := esr.maybeProvision(ctx, svc, l); err != nil {
|
||||
if err := esr.maybeProvision(ctx, svc, lg); err != nil {
|
||||
if strings.Contains(err.Error(), optimisticLockErrorMsg) {
|
||||
l.Infof("optimistic lock error, retrying: %s", err)
|
||||
lg.Infof("optimistic lock error, retrying: %s", err)
|
||||
} else {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@ -169,15 +169,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) {
|
||||
r := svcConfiguredReason(svc, false, l)
|
||||
func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (err error) {
|
||||
r := svcConfiguredReason(svc, false, lg)
|
||||
st := metav1.ConditionFalse
|
||||
defer func() {
|
||||
msg := r
|
||||
if st != metav1.ConditionTrue && err != nil {
|
||||
msg = err.Error()
|
||||
}
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, lg)
|
||||
}()
|
||||
|
||||
crl := egressSvcChildResourceLabels(svc)
|
||||
@ -189,36 +189,36 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1
|
||||
if clusterIPSvc == nil {
|
||||
clusterIPSvc = esr.clusterIPSvcForEgress(crl)
|
||||
}
|
||||
upToDate := svcConfigurationUpToDate(svc, l)
|
||||
upToDate := svcConfigurationUpToDate(svc, lg)
|
||||
provisioned := true
|
||||
if !upToDate {
|
||||
if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, l); err != nil {
|
||||
if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, lg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !provisioned {
|
||||
l.Infof("unable to provision cluster resources")
|
||||
lg.Infof("unable to provision cluster resources")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update ExternalName Service to point at the ClusterIP Service.
|
||||
clusterDomain := retrieveClusterDomain(esr.tsNamespace, l)
|
||||
clusterDomain := retrieveClusterDomain(esr.tsNamespace, lg)
|
||||
clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain)
|
||||
if svc.Spec.ExternalName != clusterIPSvcFQDN {
|
||||
l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN)
|
||||
lg.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN)
|
||||
svc.Spec.ExternalName = clusterIPSvcFQDN
|
||||
if err = esr.updateSvcSpec(ctx, svc); err != nil {
|
||||
err = fmt.Errorf("error updating ExternalName Service: %w", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
r = svcConfiguredReason(svc, true, l)
|
||||
r = svcConfiguredReason(svc, true, lg)
|
||||
st = metav1.ConditionTrue
|
||||
return nil
|
||||
}
|
||||
|
||||
func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, l *zap.SugaredLogger) (*corev1.Service, bool, error) {
|
||||
l.Infof("updating configuration...")
|
||||
func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, lg *zap.SugaredLogger) (*corev1.Service, bool, error) {
|
||||
lg.Infof("updating configuration...")
|
||||
usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err)
|
||||
@ -246,7 +246,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
l.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port)
|
||||
lg.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port)
|
||||
clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1)
|
||||
}
|
||||
}
|
||||
@ -277,7 +277,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts)
|
||||
}
|
||||
p := unusedPort(usedPorts)
|
||||
l.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p)
|
||||
lg.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p)
|
||||
usedPorts.Insert(p)
|
||||
clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{
|
||||
Name: wantsPM.Name,
|
||||
@ -343,14 +343,14 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err)
|
||||
}
|
||||
if cm == nil {
|
||||
l.Info("ConfigMap not yet created, waiting..")
|
||||
lg.Info("ConfigMap not yet created, waiting..")
|
||||
return nil, false, nil
|
||||
}
|
||||
tailnetSvc := tailnetSvcName(svc)
|
||||
gotCfg := (*cfgs)[tailnetSvc]
|
||||
wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l)
|
||||
wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, lg)
|
||||
if !reflect.DeepEqual(gotCfg, wantsCfg) {
|
||||
l.Debugf("updating egress services ConfigMap %s", cm.Name)
|
||||
lg.Debugf("updating egress services ConfigMap %s", cm.Name)
|
||||
mak.Set(cfgs, tailnetSvc, wantsCfg)
|
||||
bs, err := json.Marshal(cfgs)
|
||||
if err != nil {
|
||||
@ -361,7 +361,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
|
||||
return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err)
|
||||
}
|
||||
}
|
||||
l.Infof("egress service configuration has been updated")
|
||||
lg.Infof("egress service configuration has been updated")
|
||||
return clusterIPSvc, true, nil
|
||||
}
|
||||
|
||||
@ -402,7 +402,7 @@ func (esr *egressSvcsReconciler) maybeCleanup(ctx context.Context, svc *corev1.S
|
||||
return nil
|
||||
}
|
||||
|
||||
func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) error {
|
||||
func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) error {
|
||||
wantsProxyGroup := svc.Annotations[AnnotationProxyGroup]
|
||||
cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
if cond == nil {
|
||||
@ -416,7 +416,7 @@ func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup)
|
||||
if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, l); err != nil {
|
||||
if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, lg); err != nil {
|
||||
return fmt.Errorf("error deleting egress service config: %w", err)
|
||||
}
|
||||
return nil
|
||||
@ -471,17 +471,17 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
|
||||
Namespace: esr.tsNamespace,
|
||||
},
|
||||
}
|
||||
l := logger.With("ConfigMap", client.ObjectKeyFromObject(cm))
|
||||
l.Debug("ensuring that egress service configuration is removed from proxy config")
|
||||
lggr := logger.With("ConfigMap", client.ObjectKeyFromObject(cm))
|
||||
lggr.Debug("ensuring that egress service configuration is removed from proxy config")
|
||||
if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) {
|
||||
l.Debugf("ConfigMap not found")
|
||||
lggr.Debugf("ConfigMap not found")
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error retrieving ConfigMap: %w", err)
|
||||
}
|
||||
bs := cm.BinaryData[egressservices.KeyEgressServices]
|
||||
if len(bs) == 0 {
|
||||
l.Debugf("ConfigMap does not contain egress service configs")
|
||||
lggr.Debugf("ConfigMap does not contain egress service configs")
|
||||
return nil
|
||||
}
|
||||
cfgs := &egressservices.Configs{}
|
||||
@ -491,12 +491,12 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
|
||||
tailnetSvc := tailnetSvcName(svc)
|
||||
_, ok := (*cfgs)[tailnetSvc]
|
||||
if !ok {
|
||||
l.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted")
|
||||
lggr.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted")
|
||||
return nil
|
||||
}
|
||||
l.Infof("before deleting config %+#v", *cfgs)
|
||||
lggr.Infof("before deleting config %+#v", *cfgs)
|
||||
delete(*cfgs, tailnetSvc)
|
||||
l.Infof("after deleting config %+#v", *cfgs)
|
||||
lggr.Infof("after deleting config %+#v", *cfgs)
|
||||
bs, err := json.Marshal(cfgs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling egress services configs: %w", err)
|
||||
@ -505,7 +505,7 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context,
|
||||
return esr.Update(ctx, cm)
|
||||
}
|
||||
|
||||
func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (bool, error) {
|
||||
func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (bool, error) {
|
||||
proxyGroupName := svc.Annotations[AnnotationProxyGroup]
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -513,36 +513,36 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s
|
||||
},
|
||||
}
|
||||
if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) {
|
||||
l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l)
|
||||
lg.Infof("ProxyGroup %q not found, waiting...", proxyGroupName)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg)
|
||||
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, lg)
|
||||
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
return false, err
|
||||
}
|
||||
if violations := validateEgressService(svc, pg); len(violations) > 0 {
|
||||
msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", "))
|
||||
esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg)
|
||||
l.Info(msg)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l)
|
||||
lg.Info(msg)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, lg)
|
||||
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
return false, nil
|
||||
}
|
||||
if !tsoperator.ProxyGroupAvailable(pg) {
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l)
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg)
|
||||
tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
}
|
||||
|
||||
l.Debugf("egress service is valid")
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l)
|
||||
lg.Debugf("egress service is valid")
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, lg)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config {
|
||||
d := retrieveClusterDomain(ns, l)
|
||||
func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, lg *zap.SugaredLogger) egressservices.Config {
|
||||
d := retrieveClusterDomain(ns, lg)
|
||||
tt := tailnetTargetFromSvc(externalNameSvc)
|
||||
hep := healthCheckForSvc(clusterIPSvc, d)
|
||||
cfg := egressservices.Config{
|
||||
@ -691,18 +691,18 @@ func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string {
|
||||
|
||||
// egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service.
|
||||
func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string {
|
||||
l := egressSvcChildResourceLabels(extNSvc)
|
||||
lbels := egressSvcChildResourceLabels(extNSvc)
|
||||
// Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the
|
||||
// endpoints defined on this EndpointSlice.
|
||||
// https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership
|
||||
l[discoveryv1.LabelServiceName] = clusterIPSvc.Name
|
||||
lbels[discoveryv1.LabelServiceName] = clusterIPSvc.Name
|
||||
// Kubernetes recommends setting this label.
|
||||
// https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management
|
||||
l[discoveryv1.LabelManagedBy] = "tailscale.com"
|
||||
return l
|
||||
lbels[discoveryv1.LabelManagedBy] = "tailscale.com"
|
||||
return lbels
|
||||
}
|
||||
|
||||
func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool {
|
||||
func svcConfigurationUpToDate(svc *corev1.Service, lg *zap.SugaredLogger) bool {
|
||||
cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured)
|
||||
if cond == nil {
|
||||
return false
|
||||
@ -710,21 +710,21 @@ func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool {
|
||||
if cond.Status != metav1.ConditionTrue {
|
||||
return false
|
||||
}
|
||||
wantsReadyReason := svcConfiguredReason(svc, true, l)
|
||||
wantsReadyReason := svcConfiguredReason(svc, true, lg)
|
||||
return strings.EqualFold(wantsReadyReason, cond.Reason)
|
||||
}
|
||||
|
||||
func cfgHash(c cfg, l *zap.SugaredLogger) string {
|
||||
func cfgHash(c cfg, lg *zap.SugaredLogger) string {
|
||||
bs, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
// Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace.
|
||||
l.Infof("error marhsalling Config: %v", err)
|
||||
lg.Infof("error marhsalling Config: %v", err)
|
||||
return ""
|
||||
}
|
||||
h := sha256.New()
|
||||
if _, err := h.Write(bs); err != nil {
|
||||
// Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace.
|
||||
l.Infof("error producing Config hash: %v", err)
|
||||
lg.Infof("error producing Config hash: %v", err)
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
@ -736,7 +736,7 @@ type cfg struct {
|
||||
ProxyGroup string `json:"proxyGroup"`
|
||||
}
|
||||
|
||||
func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLogger) string {
|
||||
func svcConfiguredReason(svc *corev1.Service, configured bool, lg *zap.SugaredLogger) string {
|
||||
var r string
|
||||
if configured {
|
||||
r = "ConfiguredFor:"
|
||||
@ -750,7 +750,7 @@ func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLog
|
||||
TailnetTarget: tt,
|
||||
ProxyGroup: svc.Annotations[AnnotationProxyGroup],
|
||||
}
|
||||
r += fmt.Sprintf(":Config:%s", cfgHash(s, l))
|
||||
r += fmt.Sprintf(":Config:%s", cfgHash(s, lg))
|
||||
return r
|
||||
}
|
||||
|
||||
|
||||
@ -249,9 +249,9 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort {
|
||||
return ports
|
||||
}
|
||||
|
||||
func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) {
|
||||
func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, lg *zap.Logger) {
|
||||
t.Helper()
|
||||
wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar())
|
||||
wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, lg.Sugar())
|
||||
if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil {
|
||||
t.Fatalf("Error retrieving ConfigMap: %v", err)
|
||||
}
|
||||
|
||||
@ -1282,8 +1282,8 @@ func TestServiceProxyClassAnnotation(t *testing.T) {
|
||||
slist := &corev1.SecretList{}
|
||||
fc.List(context.Background(), slist, client.InNamespace("operator-ns"))
|
||||
for _, i := range slist.Items {
|
||||
l, _ := json.Marshal(i.Labels)
|
||||
t.Logf("found secret %q with labels %q ", i.Name, string(l))
|
||||
labels, _ := json.Marshal(i.Labels)
|
||||
t.Logf("found secret %q with labels %q ", i.Name, string(labels))
|
||||
}
|
||||
|
||||
_, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
|
||||
@ -524,16 +524,16 @@ func pgSecretLabels(pgName, secretType string) map[string]string {
|
||||
}
|
||||
|
||||
func pgLabels(pgName string, customLabels map[string]string) map[string]string {
|
||||
l := make(map[string]string, len(customLabels)+3)
|
||||
labels := make(map[string]string, len(customLabels)+3)
|
||||
for k, v := range customLabels {
|
||||
l[k] = v
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
l[kubetypes.LabelManaged] = "true"
|
||||
l[LabelParentType] = "proxygroup"
|
||||
l[LabelParentName] = pgName
|
||||
labels[kubetypes.LabelManaged] = "true"
|
||||
labels[LabelParentType] = "proxygroup"
|
||||
labels[LabelParentName] = pgName
|
||||
|
||||
return l
|
||||
return labels
|
||||
}
|
||||
|
||||
func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference {
|
||||
|
||||
@ -281,17 +281,17 @@ func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar {
|
||||
}
|
||||
|
||||
func labels(app, instance string, customLabels map[string]string) map[string]string {
|
||||
l := make(map[string]string, len(customLabels)+3)
|
||||
labels := make(map[string]string, len(customLabels)+3)
|
||||
for k, v := range customLabels {
|
||||
l[k] = v
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
// ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
|
||||
l["app.kubernetes.io/name"] = app
|
||||
l["app.kubernetes.io/instance"] = instance
|
||||
l["app.kubernetes.io/managed-by"] = "tailscale-operator"
|
||||
labels["app.kubernetes.io/name"] = app
|
||||
labels["app.kubernetes.io/instance"] = instance
|
||||
labels["app.kubernetes.io/managed-by"] = "tailscale-operator"
|
||||
|
||||
return l
|
||||
return labels
|
||||
}
|
||||
|
||||
func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference {
|
||||
|
||||
@ -50,32 +50,32 @@ func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interf
|
||||
}
|
||||
}
|
||||
|
||||
func (l *configLoader) WatchConfig(ctx context.Context, path string) error {
|
||||
func (ld *configLoader) WatchConfig(ctx context.Context, path string) error {
|
||||
secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:")
|
||||
if isKubeSecret {
|
||||
secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator))
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format <namespace>/<name>", path)
|
||||
}
|
||||
if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) {
|
||||
if err := ld.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) {
|
||||
return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) {
|
||||
if err := ld.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) {
|
||||
return fmt.Errorf("error watching config file %q: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
|
||||
if bytes.Equal(raw, l.previous) {
|
||||
if l.cfgIgnored != nil && testenv.InTest() {
|
||||
l.once.Do(func() {
|
||||
close(l.cfgIgnored)
|
||||
func (ld *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
|
||||
if bytes.Equal(raw, ld.previous) {
|
||||
if ld.cfgIgnored != nil && testenv.InTest() {
|
||||
ld.once.Do(func() {
|
||||
close(ld.cfgIgnored)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
@ -89,14 +89,14 @@ func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case l.cfgChan <- &cfg:
|
||||
case ld.cfgChan <- &cfg:
|
||||
}
|
||||
|
||||
l.previous = raw
|
||||
ld.previous = raw
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error {
|
||||
func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string) error {
|
||||
var (
|
||||
tickChan <-chan time.Time
|
||||
eventChan <-chan fsnotify.Event
|
||||
@ -106,14 +106,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string)
|
||||
if w, err := fsnotify.NewWatcher(); err != nil {
|
||||
// Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor.
|
||||
// See https://github.com/tailscale/tailscale/issues/15081
|
||||
l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err)
|
||||
ld.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err)
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
tickChan = ticker.C
|
||||
} else {
|
||||
dir := filepath.Dir(path)
|
||||
file := filepath.Base(path)
|
||||
l.logger.Infof("Watching directory %q for changes to config file %q", dir, file)
|
||||
ld.logger.Infof("Watching directory %q for changes to config file %q", dir, file)
|
||||
defer w.Close()
|
||||
if err := w.Add(dir); err != nil {
|
||||
return fmt.Errorf("failed to add fsnotify watch: %w", err)
|
||||
@ -128,7 +128,7 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading config file %q: %w", path, err)
|
||||
}
|
||||
if err := l.reloadConfig(ctx, b); err != nil {
|
||||
if err := ld.reloadConfig(ctx, b); err != nil {
|
||||
return fmt.Errorf("error loading initial config file %q: %w", path, err)
|
||||
}
|
||||
|
||||
@ -163,14 +163,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string)
|
||||
if len(b) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := l.reloadConfig(ctx, b); err != nil {
|
||||
if err := ld.reloadConfig(ctx, b); err != nil {
|
||||
return fmt.Errorf("error reloading config file %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error {
|
||||
secrets := l.client.Secrets(secretNamespace)
|
||||
func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error {
|
||||
secrets := ld.client.Secrets(secretNamespace)
|
||||
w, err := secrets.Watch(ctx, metav1.ListOptions{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
@ -198,11 +198,11 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames
|
||||
return fmt.Errorf("failed to get config Secret %q: %w", secretName, err)
|
||||
}
|
||||
|
||||
if err := l.configFromSecret(ctx, secret); err != nil {
|
||||
if err := ld.configFromSecret(ctx, secret); err != nil {
|
||||
return fmt.Errorf("error loading initial config: %w", err)
|
||||
}
|
||||
|
||||
l.logger.Infof("Watching config Secret %q for changes", secretName)
|
||||
ld.logger.Infof("Watching config Secret %q for changes", secretName)
|
||||
for {
|
||||
var secret *corev1.Secret
|
||||
select {
|
||||
@ -237,7 +237,7 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames
|
||||
if secret == nil || secret.Data == nil {
|
||||
continue
|
||||
}
|
||||
if err := l.configFromSecret(ctx, secret); err != nil {
|
||||
if err := ld.configFromSecret(ctx, secret); err != nil {
|
||||
return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err)
|
||||
}
|
||||
case watch.Error:
|
||||
@ -250,13 +250,13 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames
|
||||
}
|
||||
}
|
||||
|
||||
func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error {
|
||||
func (ld *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error {
|
||||
b := s.Data[kubetypes.KubeAPIServerConfigFile]
|
||||
if len(b) == 0 {
|
||||
return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile)
|
||||
}
|
||||
|
||||
if err := l.reloadConfig(ctx, b); err != nil {
|
||||
if err := ld.reloadConfig(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -125,15 +125,15 @@ func TestWatchConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
configChan := make(chan *conf.Config)
|
||||
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
|
||||
l.cfgIgnored = make(chan struct{})
|
||||
loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
|
||||
loader.cfgIgnored = make(chan struct{})
|
||||
errs := make(chan error)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
writeFile(t, tc.initialConfig)
|
||||
go func() {
|
||||
errs <- l.WatchConfig(ctx, cfgPath)
|
||||
errs <- loader.WatchConfig(ctx, cfgPath)
|
||||
}()
|
||||
|
||||
for i, p := range tc.phases {
|
||||
@ -159,7 +159,7 @@ func TestWatchConfig(t *testing.T) {
|
||||
} else if !strings.Contains(err.Error(), p.expectedErr) {
|
||||
t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error())
|
||||
}
|
||||
case <-l.cfgIgnored:
|
||||
case <-loader.cfgIgnored:
|
||||
if p.expectedConf != nil {
|
||||
t.Fatalf("expected config to be reloaded, but got ignored signal")
|
||||
}
|
||||
@ -192,13 +192,13 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) {
|
||||
})
|
||||
|
||||
configChan := make(chan *conf.Config)
|
||||
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
|
||||
loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
|
||||
|
||||
mustCreateOrUpdate(t, cl, secretFrom(expected[0]))
|
||||
|
||||
errs := make(chan error)
|
||||
go func() {
|
||||
errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret")
|
||||
errs <- loader.watchConfigSecretChanges(t.Context(), "default", "config-secret")
|
||||
}()
|
||||
|
||||
for i := range 2 {
|
||||
@ -212,7 +212,7 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) {
|
||||
}
|
||||
case err := <-errs:
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
case <-l.cfgIgnored:
|
||||
case <-loader.cfgIgnored:
|
||||
t.Fatalf("expected config to be reloaded, but got ignored signal")
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for expected event")
|
||||
|
||||
@ -422,9 +422,9 @@ func (ipp *ConsensusIPPool) applyCheckoutAddr(nid tailcfg.NodeID, domain string,
|
||||
}
|
||||
|
||||
// Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state.
|
||||
func (ipp *ConsensusIPPool) Apply(l *raft.Log) any {
|
||||
func (ipp *ConsensusIPPool) Apply(lg *raft.Log) any {
|
||||
var c tsconsensus.Command
|
||||
if err := json.Unmarshal(l.Data, &c); err != nil {
|
||||
if err := json.Unmarshal(lg.Data, &c); err != nil {
|
||||
panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error()))
|
||||
}
|
||||
switch c.Name {
|
||||
|
||||
@ -156,13 +156,13 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) {
|
||||
client, _, _ := startNode(t, ctx, controlURL, "client")
|
||||
|
||||
// Make sure that the sni node has received its config.
|
||||
l, err := sni.LocalClient()
|
||||
lc, err := sni.LocalClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotConfigured := false
|
||||
for range 100 {
|
||||
s, err := l.StatusWithoutPeers(ctx)
|
||||
s, err := lc.StatusWithoutPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -135,18 +135,18 @@ type lportsPool struct {
|
||||
ports []int
|
||||
}
|
||||
|
||||
func (l *lportsPool) get() int {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
ret := l.ports[0]
|
||||
l.ports = append(l.ports[:0], l.ports[1:]...)
|
||||
func (pl *lportsPool) get() int {
|
||||
pl.Lock()
|
||||
defer pl.Unlock()
|
||||
ret := pl.ports[0]
|
||||
pl.ports = append(pl.ports[:0], pl.ports[1:]...)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (l *lportsPool) put(i int) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.ports = append(l.ports, int(i))
|
||||
func (pl *lportsPool) put(i int) {
|
||||
pl.Lock()
|
||||
defer pl.Unlock()
|
||||
pl.ports = append(pl.ports, int(i))
|
||||
}
|
||||
|
||||
var (
|
||||
@ -173,19 +173,19 @@ func init() {
|
||||
// measure dial time.
|
||||
type lportForTCPConn int
|
||||
|
||||
func (l *lportForTCPConn) Close() error {
|
||||
if *l == 0 {
|
||||
func (lp *lportForTCPConn) Close() error {
|
||||
if *lp == 0 {
|
||||
return nil
|
||||
}
|
||||
lports.put(int(*l))
|
||||
lports.put(int(*lp))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lportForTCPConn) Write([]byte) (int, error) {
|
||||
func (lp *lportForTCPConn) Write([]byte) (int, error) {
|
||||
return 0, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (l *lportForTCPConn) Read([]byte) (int, error) {
|
||||
func (lp *lportForTCPConn) Read([]byte) (int, error) {
|
||||
return 0, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
|
||||
@ -65,9 +65,9 @@ func main() {
|
||||
}
|
||||
|
||||
add, remove := diffTags(stags, dtags)
|
||||
if l := len(add); l > 0 {
|
||||
if ln := len(add); ln > 0 {
|
||||
log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", "))
|
||||
if *max > 0 && l > *max {
|
||||
if *max > 0 && ln > *max {
|
||||
log.Printf("Limiting sync to %d tags", *max)
|
||||
add = add[:*max]
|
||||
}
|
||||
|
||||
@ -75,8 +75,8 @@ func peerInfo(peer *ipnstate.TKAPeer) string {
|
||||
|
||||
// print prints a message about a node key signature and a re-signing command if needed.
|
||||
func print(info string, nodeKey key.NodePublic, sig tka.NodeKeySignature) {
|
||||
if l := chainLength(sig); l > *maxRotations {
|
||||
log.Printf("%s: chain length %d, printing command to re-sign", info, l)
|
||||
if ln := chainLength(sig); ln > *maxRotations {
|
||||
log.Printf("%s: chain length %d, printing command to re-sign", info, ln)
|
||||
wrapping, _ := sig.UnverifiedWrappingPublic()
|
||||
fmt.Printf("tailscale lock sign %s %s\n", nodeKey, key.NLPublicFromEd25519Unsafe(wrapping).CLIString())
|
||||
} else {
|
||||
|
||||
@ -25,12 +25,12 @@ func newConnListener() *connListener {
|
||||
}
|
||||
}
|
||||
|
||||
func (l *connListener) Accept() (net.Conn, error) {
|
||||
func (ln *connListener) Accept() (net.Conn, error) {
|
||||
select {
|
||||
case <-l.closedCh:
|
||||
case <-ln.closedCh:
|
||||
// TODO(oxtoacart): make this error match what a regular net.Listener does
|
||||
return nil, syscall.EINVAL
|
||||
case conn := <-l.ch:
|
||||
case conn := <-ln.ch:
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
@ -38,32 +38,32 @@ func (l *connListener) Accept() (net.Conn, error) {
|
||||
// Addr implements net.Listener. This always returns nil. It is assumed that
|
||||
// this method is currently unused, so it logs a warning if it ever does get
|
||||
// called.
|
||||
func (l *connListener) Addr() net.Addr {
|
||||
func (ln *connListener) Addr() net.Addr {
|
||||
log.Println("warning: unexpected call to connListener.Addr()")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *connListener) Close() error {
|
||||
l.closeMu.Lock()
|
||||
defer l.closeMu.Unlock()
|
||||
func (ln *connListener) Close() error {
|
||||
ln.closeMu.Lock()
|
||||
defer ln.closeMu.Unlock()
|
||||
|
||||
select {
|
||||
case <-l.closedCh:
|
||||
case <-ln.closedCh:
|
||||
// Already closed.
|
||||
return syscall.EINVAL
|
||||
default:
|
||||
// We don't close l.ch because someone maybe trying to send to that,
|
||||
// which would cause a panic.
|
||||
close(l.closedCh)
|
||||
close(ln.closedCh)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error {
|
||||
func (ln *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error {
|
||||
select {
|
||||
case <-l.closedCh:
|
||||
case <-ln.closedCh:
|
||||
return syscall.EINVAL
|
||||
case l.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}:
|
||||
case ln.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}:
|
||||
// Connection has been accepted.
|
||||
}
|
||||
return nil
|
||||
|
||||
@ -10,20 +10,20 @@ import (
|
||||
)
|
||||
|
||||
func TestConnListener(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:")
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to Listen: %s", err)
|
||||
}
|
||||
|
||||
cl := newConnListener()
|
||||
// Test that we can accept a connection
|
||||
cc, err := net.Dial("tcp", l.Addr().String())
|
||||
cc, err := net.Dial("tcp", ln.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to Dial: %s", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
|
||||
sc, err := l.Accept()
|
||||
sc, err := ln.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to Accept: %s", err)
|
||||
}
|
||||
|
||||
@ -467,14 +467,14 @@ func newSystem(t *testing.T) *system {
|
||||
tstest.ResourceCheck(t)
|
||||
|
||||
fs := newFileSystemForLocal(log.Printf, nil)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to Listen: %s", err)
|
||||
}
|
||||
t.Logf("FileSystemForLocal listening at %s", l.Addr())
|
||||
t.Logf("FileSystemForLocal listening at %s", ln.Addr())
|
||||
go func() {
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
t.Logf("Accept: %v", err)
|
||||
return
|
||||
@ -483,11 +483,11 @@ func newSystem(t *testing.T) *system {
|
||||
}
|
||||
}()
|
||||
|
||||
client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", l.Addr()), &noopAuthorizer{})
|
||||
client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", ln.Addr()), &noopAuthorizer{})
|
||||
client.SetTransport(&http.Transport{DisableKeepAlives: true})
|
||||
s := &system{
|
||||
t: t,
|
||||
local: &local{l: l, fs: fs},
|
||||
local: &local{l: ln, fs: fs},
|
||||
client: client,
|
||||
remotes: make(map[string]*remote),
|
||||
}
|
||||
@ -496,11 +496,11 @@ func newSystem(t *testing.T) *system {
|
||||
}
|
||||
|
||||
func (s *system) addRemote(name string) string {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
s.t.Fatalf("failed to Listen: %s", err)
|
||||
}
|
||||
s.t.Logf("Remote for %v listening at %s", name, l.Addr())
|
||||
s.t.Logf("Remote for %v listening at %s", name, ln.Addr())
|
||||
|
||||
fileServer, err := NewFileServer()
|
||||
if err != nil {
|
||||
@ -510,14 +510,14 @@ func (s *system) addRemote(name string) string {
|
||||
s.t.Logf("FileServer for %v listening at %s", name, fileServer.Addr())
|
||||
|
||||
r := &remote{
|
||||
l: l,
|
||||
l: ln,
|
||||
fileServer: fileServer,
|
||||
fs: NewFileSystemForRemote(log.Printf),
|
||||
shares: make(map[string]string),
|
||||
permissions: make(map[string]drive.Permission),
|
||||
}
|
||||
r.fs.SetFileServerAddr(fileServer.Addr())
|
||||
go http.Serve(l, r)
|
||||
go http.Serve(ln, r)
|
||||
s.remotes[name] = r
|
||||
|
||||
remotes := make([]*drive.Remote, 0, len(s.remotes))
|
||||
|
||||
@ -20,7 +20,7 @@ import (
|
||||
// It's typically used in a separate process from the actual Taildrive server to
|
||||
// serve up files as an unprivileged user.
|
||||
type FileServer struct {
|
||||
l net.Listener
|
||||
ln net.Listener
|
||||
secretToken string
|
||||
shareHandlers map[string]http.Handler
|
||||
sharesMu sync.RWMutex
|
||||
@ -41,10 +41,10 @@ type FileServer struct {
|
||||
// called.
|
||||
func NewFileServer() (*FileServer, error) {
|
||||
// path := filepath.Join(os.TempDir(), fmt.Sprintf("%v.socket", uuid.New().String()))
|
||||
// l, err := safesocket.Listen(path)
|
||||
// ln, err := safesocket.Listen(path)
|
||||
// if err != nil {
|
||||
// TODO(oxtoacart): actually get safesocket working in more environments (MacOS Sandboxed, Windows, ???)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listen: %w", err)
|
||||
}
|
||||
@ -55,7 +55,7 @@ func NewFileServer() (*FileServer, error) {
|
||||
}
|
||||
|
||||
return &FileServer{
|
||||
l: l,
|
||||
ln: ln,
|
||||
secretToken: secretToken,
|
||||
shareHandlers: make(map[string]http.Handler),
|
||||
}, nil
|
||||
@ -74,12 +74,12 @@ func generateSecretToken() (string, error) {
|
||||
// Addr returns the address at which this FileServer is listening. This
|
||||
// includes the secret token in front of the address, delimited by a pipe |.
|
||||
func (s *FileServer) Addr() string {
|
||||
return fmt.Sprintf("%s|%s", s.secretToken, s.l.Addr().String())
|
||||
return fmt.Sprintf("%s|%s", s.secretToken, s.ln.Addr().String())
|
||||
}
|
||||
|
||||
// Serve() starts serving files and blocks until it encounters a fatal error.
|
||||
func (s *FileServer) Serve() error {
|
||||
return http.Serve(s.l, s)
|
||||
return http.Serve(s.ln, s)
|
||||
}
|
||||
|
||||
// LockShares locks the map of shares in preparation for manipulating it.
|
||||
@ -162,5 +162,5 @@ func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *FileServer) Close() error {
|
||||
return s.l.Close()
|
||||
return s.ln.Close()
|
||||
}
|
||||
|
||||
@ -29,8 +29,8 @@ type logOnce struct {
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func (l *logOnce) logf(format string, args ...any) {
|
||||
l.Once.Do(func() {
|
||||
func (lg *logOnce) logf(format string, args ...any) {
|
||||
lg.Once.Do(func() {
|
||||
log.Printf(format, args...)
|
||||
})
|
||||
}
|
||||
|
||||
@ -266,12 +266,12 @@ func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
limit := 50
|
||||
if limitStr := r.FormValue("limit"); limitStr != "" {
|
||||
l, err := strconv.Atoi(limitStr)
|
||||
lm, err := strconv.Atoi(limitStr)
|
||||
if err != nil {
|
||||
http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
limit = int(l)
|
||||
limit = int(lm)
|
||||
}
|
||||
|
||||
updates, err := h.b.NetworkLockLog(limit)
|
||||
|
||||
@ -352,12 +352,12 @@ type ServiceMonitor struct {
|
||||
|
||||
type Labels map[string]LabelValue
|
||||
|
||||
func (l Labels) Parse() map[string]string {
|
||||
if l == nil {
|
||||
func (lb Labels) Parse() map[string]string {
|
||||
if lb == nil {
|
||||
return nil
|
||||
}
|
||||
m := make(map[string]string, len(l))
|
||||
for k, v := range l {
|
||||
m := make(map[string]string, len(lb))
|
||||
for k, v := range lb {
|
||||
m[k] = string(v)
|
||||
}
|
||||
return m
|
||||
|
||||
@ -99,7 +99,7 @@ func Test_conn_Read(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
l := zl.Sugar()
|
||||
log := zl.Sugar()
|
||||
tc := &fakes.TestConn{}
|
||||
sr := &fakes.TestSessionRecorder{}
|
||||
rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar())
|
||||
@ -110,7 +110,7 @@ func Test_conn_Read(t *testing.T) {
|
||||
c := &conn{
|
||||
ctx: ctx,
|
||||
Conn: tc,
|
||||
log: l,
|
||||
log: log,
|
||||
hasTerm: true,
|
||||
initialCastHeaderSent: make(chan struct{}),
|
||||
rec: rec,
|
||||
|
||||
@ -69,12 +69,12 @@ var _ json.Unmarshaler = &PortMaps{}
|
||||
func (p *PortMaps) UnmarshalJSON(data []byte) error {
|
||||
*p = make(map[PortMap]struct{})
|
||||
|
||||
var l []PortMap
|
||||
if err := json.Unmarshal(data, &l); err != nil {
|
||||
var v []PortMap
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pm := range l {
|
||||
for _, pm := range v {
|
||||
(*p)[pm] = struct{}{}
|
||||
}
|
||||
|
||||
@ -82,12 +82,12 @@ func (p *PortMaps) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
func (p PortMaps) MarshalJSON() ([]byte, error) {
|
||||
l := make([]PortMap, 0, len(p))
|
||||
v := make([]PortMap, 0, len(p))
|
||||
for pm := range p {
|
||||
l = append(l, pm)
|
||||
v = append(v, pm)
|
||||
}
|
||||
|
||||
return json.Marshal(l)
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
// Status represents the currently configured firewall rules for all egress
|
||||
|
||||
@ -40,10 +40,10 @@ type localClient struct {
|
||||
lc *local.Client
|
||||
}
|
||||
|
||||
func (l *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) {
|
||||
return l.lc.WatchIPNBus(ctx, mask)
|
||||
func (lc *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) {
|
||||
return lc.lc.WatchIPNBus(ctx, mask)
|
||||
}
|
||||
|
||||
func (l *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) {
|
||||
return l.lc.CertPair(ctx, domain)
|
||||
func (lc *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) {
|
||||
return lc.lc.CertPair(ctx, domain)
|
||||
}
|
||||
|
||||
@ -146,33 +146,33 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne
|
||||
// SetLoggingEnabled enables or disables logging.
|
||||
// When disabled, socket stats are not polled and no new logs are written to disk.
|
||||
// Existing logs can still be fetched via the C2N API.
|
||||
func (l *Logger) SetLoggingEnabled(v bool) {
|
||||
old := l.enabled.Load()
|
||||
if old != v && l.enabled.CompareAndSwap(old, v) {
|
||||
func (lg *Logger) SetLoggingEnabled(v bool) {
|
||||
old := lg.enabled.Load()
|
||||
if old != v && lg.enabled.CompareAndSwap(old, v) {
|
||||
if v {
|
||||
if l.eventCh == nil {
|
||||
if lg.eventCh == nil {
|
||||
// eventCh should be large enough for the number of events that will occur within logInterval.
|
||||
// Add an extra second's worth of events to ensure we don't drop any.
|
||||
l.eventCh = make(chan event, (logInterval+time.Second)/pollInterval)
|
||||
lg.eventCh = make(chan event, (logInterval+time.Second)/pollInterval)
|
||||
}
|
||||
l.ctx, l.cancelFn = context.WithCancel(context.Background())
|
||||
go l.poll()
|
||||
go l.logEvents()
|
||||
lg.ctx, lg.cancelFn = context.WithCancel(context.Background())
|
||||
go lg.poll()
|
||||
go lg.logEvents()
|
||||
} else {
|
||||
l.cancelFn()
|
||||
lg.cancelFn()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) Write(p []byte) (int, error) {
|
||||
return l.logger.Write(p)
|
||||
func (lg *Logger) Write(p []byte) (int, error) {
|
||||
return lg.logger.Write(p)
|
||||
}
|
||||
|
||||
// poll fetches the current socket stats at the configured time interval,
|
||||
// calculates the delta since the last poll,
|
||||
// and writes any non-zero values to the logger event channel.
|
||||
// This method does not return.
|
||||
func (l *Logger) poll() {
|
||||
func (lg *Logger) poll() {
|
||||
// last is the last set of socket stats we saw.
|
||||
var lastStats *sockstats.SockStats
|
||||
var lastTime time.Time
|
||||
@ -180,7 +180,7 @@ func (l *Logger) poll() {
|
||||
ticker := time.NewTicker(pollInterval)
|
||||
for {
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
case <-lg.ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
case t := <-ticker.C:
|
||||
@ -196,7 +196,7 @@ func (l *Logger) poll() {
|
||||
if stats.CurrentInterfaceCellular {
|
||||
e.IsCellularInterface = 1
|
||||
}
|
||||
l.eventCh <- e
|
||||
lg.eventCh <- e
|
||||
}
|
||||
}
|
||||
lastTime = t
|
||||
@ -207,14 +207,14 @@ func (l *Logger) poll() {
|
||||
|
||||
// logEvents reads events from the event channel at logInterval and logs them to disk.
|
||||
// This method does not return.
|
||||
func (l *Logger) logEvents() {
|
||||
enc := json.NewEncoder(l)
|
||||
func (lg *Logger) logEvents() {
|
||||
enc := json.NewEncoder(lg)
|
||||
flush := func() {
|
||||
for {
|
||||
select {
|
||||
case e := <-l.eventCh:
|
||||
case e := <-lg.eventCh:
|
||||
if err := enc.Encode(e); err != nil {
|
||||
l.logf("sockstatlog: error encoding log: %v", err)
|
||||
lg.logf("sockstatlog: error encoding log: %v", err)
|
||||
}
|
||||
default:
|
||||
return
|
||||
@ -224,7 +224,7 @@ func (l *Logger) logEvents() {
|
||||
ticker := time.NewTicker(logInterval)
|
||||
for {
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
case <-lg.ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
@ -233,29 +233,29 @@ func (l *Logger) logEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) LogID() string {
|
||||
if l.logger == nil {
|
||||
func (lg *Logger) LogID() string {
|
||||
if lg.logger == nil {
|
||||
return ""
|
||||
}
|
||||
return l.logger.PrivateID().Public().String()
|
||||
return lg.logger.PrivateID().Public().String()
|
||||
}
|
||||
|
||||
// Flush sends pending logs to the log server and flushes them from the local buffer.
|
||||
func (l *Logger) Flush() {
|
||||
l.logger.StartFlush()
|
||||
func (lg *Logger) Flush() {
|
||||
lg.logger.StartFlush()
|
||||
}
|
||||
|
||||
func (l *Logger) Shutdown(ctx context.Context) {
|
||||
if l.cancelFn != nil {
|
||||
l.cancelFn()
|
||||
func (lg *Logger) Shutdown(ctx context.Context) {
|
||||
if lg.cancelFn != nil {
|
||||
lg.cancelFn()
|
||||
}
|
||||
l.filch.Close()
|
||||
l.logger.Shutdown(ctx)
|
||||
lg.filch.Close()
|
||||
lg.logger.Shutdown(ctx)
|
||||
|
||||
type closeIdler interface {
|
||||
CloseIdleConnections()
|
||||
}
|
||||
if tr, ok := l.tr.(closeIdler); ok {
|
||||
if tr, ok := lg.tr.(closeIdler); ok {
|
||||
tr.CloseIdleConnections()
|
||||
}
|
||||
}
|
||||
|
||||
@ -193,8 +193,8 @@ type logWriter struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func (l logWriter) Write(buf []byte) (int, error) {
|
||||
l.logger.Printf("%s", buf)
|
||||
func (lg logWriter) Write(buf []byte) (int, error) {
|
||||
lg.logger.Printf("%s", buf)
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
|
||||
@ -100,7 +100,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger {
|
||||
if !cfg.CopyPrivateID.IsZero() {
|
||||
urlSuffix = "?copyId=" + cfg.CopyPrivateID.String()
|
||||
}
|
||||
l := &Logger{
|
||||
logger := &Logger{
|
||||
privateID: cfg.PrivateID,
|
||||
stderr: cfg.Stderr,
|
||||
stderrLevel: int64(cfg.StderrLevel),
|
||||
@ -124,19 +124,19 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger {
|
||||
}
|
||||
|
||||
if cfg.Bus != nil {
|
||||
l.eventClient = cfg.Bus.Client("logtail.Logger")
|
||||
logger.eventClient = cfg.Bus.Client("logtail.Logger")
|
||||
// Subscribe to change deltas from NetMon to detect when the network comes up.
|
||||
eventbus.SubscribeFunc(l.eventClient, l.onChangeDelta)
|
||||
eventbus.SubscribeFunc(logger.eventClient, logger.onChangeDelta)
|
||||
}
|
||||
l.SetSockstatsLabel(sockstats.LabelLogtailLogger)
|
||||
l.compressLogs = cfg.CompressLogs
|
||||
logger.SetSockstatsLabel(sockstats.LabelLogtailLogger)
|
||||
logger.compressLogs = cfg.CompressLogs
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
l.uploadCancel = cancel
|
||||
logger.uploadCancel = cancel
|
||||
|
||||
go l.uploading(ctx)
|
||||
l.Write([]byte("logtail started"))
|
||||
return l
|
||||
go logger.uploading(ctx)
|
||||
logger.Write([]byte("logtail started"))
|
||||
return logger
|
||||
}
|
||||
|
||||
// Logger writes logs, splitting them as configured between local
|
||||
@ -190,27 +190,27 @@ func (p *atomicSocktatsLabel) Store(label sockstats.Label) { p.p.Store(uint32(la
|
||||
// SetVerbosityLevel controls the verbosity level that should be
|
||||
// written to stderr. 0 is the default (not verbose). Levels 1 or higher
|
||||
// are increasingly verbose.
|
||||
func (l *Logger) SetVerbosityLevel(level int) {
|
||||
atomic.StoreInt64(&l.stderrLevel, int64(level))
|
||||
func (lg *Logger) SetVerbosityLevel(level int) {
|
||||
atomic.StoreInt64(&lg.stderrLevel, int64(level))
|
||||
}
|
||||
|
||||
// SetNetMon sets the network monitor.
|
||||
//
|
||||
// It should not be changed concurrently with log writes and should
|
||||
// only be set once.
|
||||
func (l *Logger) SetNetMon(lm *netmon.Monitor) {
|
||||
l.netMonitor = lm
|
||||
func (lg *Logger) SetNetMon(lm *netmon.Monitor) {
|
||||
lg.netMonitor = lm
|
||||
}
|
||||
|
||||
// SetSockstatsLabel sets the label used in sockstat logs to identify network traffic from this logger.
|
||||
func (l *Logger) SetSockstatsLabel(label sockstats.Label) {
|
||||
l.sockstatsLabel.Store(label)
|
||||
func (lg *Logger) SetSockstatsLabel(label sockstats.Label) {
|
||||
lg.sockstatsLabel.Store(label)
|
||||
}
|
||||
|
||||
// PrivateID returns the logger's private log ID.
|
||||
//
|
||||
// It exists for internal use only.
|
||||
func (l *Logger) PrivateID() logid.PrivateID { return l.privateID }
|
||||
func (lg *Logger) PrivateID() logid.PrivateID { return lg.privateID }
|
||||
|
||||
// Shutdown gracefully shuts down the logger while completing any
|
||||
// remaining uploads.
|
||||
@ -218,33 +218,33 @@ func (l *Logger) PrivateID() logid.PrivateID { return l.privateID }
|
||||
// It will block, continuing to try and upload unless the passed
|
||||
// context object interrupts it by being done.
|
||||
// If the shutdown is interrupted, an error is returned.
|
||||
func (l *Logger) Shutdown(ctx context.Context) error {
|
||||
func (lg *Logger) Shutdown(ctx context.Context) error {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
l.uploadCancel()
|
||||
<-l.shutdownDone
|
||||
case <-l.shutdownDone:
|
||||
lg.uploadCancel()
|
||||
<-lg.shutdownDone
|
||||
case <-lg.shutdownDone:
|
||||
}
|
||||
close(done)
|
||||
l.httpc.CloseIdleConnections()
|
||||
lg.httpc.CloseIdleConnections()
|
||||
}()
|
||||
|
||||
if l.eventClient != nil {
|
||||
l.eventClient.Close()
|
||||
if lg.eventClient != nil {
|
||||
lg.eventClient.Close()
|
||||
}
|
||||
l.shutdownStartMu.Lock()
|
||||
lg.shutdownStartMu.Lock()
|
||||
select {
|
||||
case <-l.shutdownStart:
|
||||
l.shutdownStartMu.Unlock()
|
||||
case <-lg.shutdownStart:
|
||||
lg.shutdownStartMu.Unlock()
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
close(l.shutdownStart)
|
||||
l.shutdownStartMu.Unlock()
|
||||
close(lg.shutdownStart)
|
||||
lg.shutdownStartMu.Unlock()
|
||||
|
||||
io.WriteString(l, "logger closing down\n")
|
||||
io.WriteString(lg, "logger closing down\n")
|
||||
<-done
|
||||
|
||||
return nil
|
||||
@ -254,8 +254,8 @@ func (l *Logger) Shutdown(ctx context.Context) error {
|
||||
// process, and any associated goroutines.
|
||||
//
|
||||
// Deprecated: use Shutdown
|
||||
func (l *Logger) Close() {
|
||||
l.Shutdown(context.Background())
|
||||
func (lg *Logger) Close() {
|
||||
lg.Shutdown(context.Background())
|
||||
}
|
||||
|
||||
// drainBlock is called by drainPending when there are no logs to drain.
|
||||
@ -265,11 +265,11 @@ func (l *Logger) Close() {
|
||||
//
|
||||
// If the caller specified FlushInterface, drainWake is only sent to
|
||||
// periodically.
|
||||
func (l *Logger) drainBlock() (shuttingDown bool) {
|
||||
func (lg *Logger) drainBlock() (shuttingDown bool) {
|
||||
select {
|
||||
case <-l.shutdownStart:
|
||||
case <-lg.shutdownStart:
|
||||
return true
|
||||
case <-l.drainWake:
|
||||
case <-lg.drainWake:
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -277,20 +277,20 @@ func (l *Logger) drainBlock() (shuttingDown bool) {
|
||||
// drainPending drains and encodes a batch of logs from the buffer for upload.
|
||||
// If no logs are available, drainPending blocks until logs are available.
|
||||
// The returned buffer is only valid until the next call to drainPending.
|
||||
func (l *Logger) drainPending() (b []byte) {
|
||||
b = l.drainBuf[:0]
|
||||
func (lg *Logger) drainPending() (b []byte) {
|
||||
b = lg.drainBuf[:0]
|
||||
b = append(b, '[')
|
||||
defer func() {
|
||||
b = bytes.TrimRight(b, ",")
|
||||
b = append(b, ']')
|
||||
l.drainBuf = b
|
||||
lg.drainBuf = b
|
||||
if len(b) <= len("[]") {
|
||||
b = nil
|
||||
}
|
||||
}()
|
||||
|
||||
maxLen := cmp.Or(l.maxUploadSize, maxSize)
|
||||
if l.lowMem {
|
||||
maxLen := cmp.Or(lg.maxUploadSize, maxSize)
|
||||
if lg.lowMem {
|
||||
// When operating in a low memory environment, it is better to upload
|
||||
// in multiple operations than it is to allocate a large body and OOM.
|
||||
// Even if maxLen is less than maxSize, we can still upload an entry
|
||||
@ -298,13 +298,13 @@ func (l *Logger) drainPending() (b []byte) {
|
||||
maxLen /= lowMemRatio
|
||||
}
|
||||
for len(b) < maxLen {
|
||||
line, err := l.buffer.TryReadLine()
|
||||
line, err := lg.buffer.TryReadLine()
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
return b
|
||||
case err != nil:
|
||||
b = append(b, '{')
|
||||
b = l.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0)
|
||||
b = lg.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0)
|
||||
b = bytes.TrimRight(b, ",")
|
||||
b = append(b, '}')
|
||||
return b
|
||||
@ -318,10 +318,10 @@ func (l *Logger) drainPending() (b []byte) {
|
||||
// in our buffer from a previous large write, let it go.
|
||||
if cap(b) > bufferSize {
|
||||
b = bytes.Clone(b)
|
||||
l.drainBuf = b
|
||||
lg.drainBuf = b
|
||||
}
|
||||
|
||||
if shuttingDown := l.drainBlock(); shuttingDown {
|
||||
if shuttingDown := lg.drainBlock(); shuttingDown {
|
||||
return b
|
||||
}
|
||||
continue
|
||||
@ -338,18 +338,18 @@ func (l *Logger) drainPending() (b []byte) {
|
||||
default:
|
||||
// This is probably a log added to stderr by filch
|
||||
// outside of the logtail logger. Encode it.
|
||||
if !l.explainedRaw {
|
||||
fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n")
|
||||
fmt.Fprintf(l.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n")
|
||||
fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n")
|
||||
fmt.Fprintf(l.stderr, "RAW-STDERR:\n")
|
||||
l.explainedRaw = true
|
||||
if !lg.explainedRaw {
|
||||
fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n")
|
||||
fmt.Fprintf(lg.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n")
|
||||
fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n")
|
||||
fmt.Fprintf(lg.stderr, "RAW-STDERR:\n")
|
||||
lg.explainedRaw = true
|
||||
}
|
||||
fmt.Fprintf(l.stderr, "RAW-STDERR: %s", b)
|
||||
fmt.Fprintf(lg.stderr, "RAW-STDERR: %s", b)
|
||||
// Do not add a client time, as it could be really old.
|
||||
// Do not include instance key or ID either,
|
||||
// since this came from a different instance.
|
||||
b = l.appendText(b, line, true, 0, 0, 0)
|
||||
b = lg.appendText(b, line, true, 0, 0, 0)
|
||||
}
|
||||
b = append(b, ',')
|
||||
}
|
||||
@ -357,14 +357,14 @@ func (l *Logger) drainPending() (b []byte) {
|
||||
}
|
||||
|
||||
// This is the goroutine that repeatedly uploads logs in the background.
|
||||
func (l *Logger) uploading(ctx context.Context) {
|
||||
defer close(l.shutdownDone)
|
||||
func (lg *Logger) uploading(ctx context.Context) {
|
||||
defer close(lg.shutdownDone)
|
||||
|
||||
for {
|
||||
body := l.drainPending()
|
||||
body := lg.drainPending()
|
||||
origlen := -1 // sentinel value: uncompressed
|
||||
// Don't attempt to compress tiny bodies; not worth the CPU cycles.
|
||||
if l.compressLogs && len(body) > 256 {
|
||||
if lg.compressLogs && len(body) > 256 {
|
||||
zbody := zstdframe.AppendEncode(nil, body,
|
||||
zstdframe.FastestCompression, zstdframe.LowMemory(true))
|
||||
|
||||
@ -381,20 +381,20 @@ func (l *Logger) uploading(ctx context.Context) {
|
||||
var numFailures int
|
||||
var firstFailure time.Time
|
||||
for len(body) > 0 && ctx.Err() == nil {
|
||||
retryAfter, err := l.upload(ctx, body, origlen)
|
||||
retryAfter, err := lg.upload(ctx, body, origlen)
|
||||
if err != nil {
|
||||
numFailures++
|
||||
firstFailure = l.clock.Now()
|
||||
firstFailure = lg.clock.Now()
|
||||
|
||||
if !l.internetUp() {
|
||||
fmt.Fprintf(l.stderr, "logtail: internet down; waiting\n")
|
||||
l.awaitInternetUp(ctx)
|
||||
if !lg.internetUp() {
|
||||
fmt.Fprintf(lg.stderr, "logtail: internet down; waiting\n")
|
||||
lg.awaitInternetUp(ctx)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only print the same message once.
|
||||
if currError := err.Error(); lastError != currError {
|
||||
fmt.Fprintf(l.stderr, "logtail: upload: %v\n", err)
|
||||
fmt.Fprintf(lg.stderr, "logtail: upload: %v\n", err)
|
||||
lastError = currError
|
||||
}
|
||||
|
||||
@ -407,55 +407,55 @@ func (l *Logger) uploading(ctx context.Context) {
|
||||
} else {
|
||||
// Only print a success message after recovery.
|
||||
if numFailures > 0 {
|
||||
fmt.Fprintf(l.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, l.clock.Since(firstFailure).Round(time.Second))
|
||||
fmt.Fprintf(lg.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, lg.clock.Since(firstFailure).Round(time.Second))
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-l.shutdownStart:
|
||||
case <-lg.shutdownStart:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) internetUp() bool {
|
||||
func (lg *Logger) internetUp() bool {
|
||||
select {
|
||||
case <-l.networkIsUp.Ready():
|
||||
case <-lg.networkIsUp.Ready():
|
||||
return true
|
||||
default:
|
||||
if l.netMonitor == nil {
|
||||
if lg.netMonitor == nil {
|
||||
return true // No way to tell, so assume it is.
|
||||
}
|
||||
return l.netMonitor.InterfaceState().AnyInterfaceUp()
|
||||
return lg.netMonitor.InterfaceState().AnyInterfaceUp()
|
||||
}
|
||||
}
|
||||
|
||||
// onChangeDelta is an eventbus subscriber function that handles
|
||||
// [netmon.ChangeDelta] events to detect whether the Internet is expected to be
|
||||
// reachable.
|
||||
func (l *Logger) onChangeDelta(delta *netmon.ChangeDelta) {
|
||||
func (lg *Logger) onChangeDelta(delta *netmon.ChangeDelta) {
|
||||
if delta.New.AnyInterfaceUp() {
|
||||
fmt.Fprintf(l.stderr, "logtail: internet back up\n")
|
||||
l.networkIsUp.Set()
|
||||
fmt.Fprintf(lg.stderr, "logtail: internet back up\n")
|
||||
lg.networkIsUp.Set()
|
||||
} else {
|
||||
fmt.Fprintf(l.stderr, "logtail: network changed, but is not up\n")
|
||||
l.networkIsUp.Reset()
|
||||
fmt.Fprintf(lg.stderr, "logtail: network changed, but is not up\n")
|
||||
lg.networkIsUp.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) awaitInternetUp(ctx context.Context) {
|
||||
if l.eventClient != nil {
|
||||
func (lg *Logger) awaitInternetUp(ctx context.Context) {
|
||||
if lg.eventClient != nil {
|
||||
select {
|
||||
case <-l.networkIsUp.Ready():
|
||||
case <-lg.networkIsUp.Ready():
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
upc := make(chan bool, 1)
|
||||
defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) {
|
||||
defer lg.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) {
|
||||
if delta.New.AnyInterfaceUp() {
|
||||
select {
|
||||
case upc <- true:
|
||||
@ -463,12 +463,12 @@ func (l *Logger) awaitInternetUp(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
})()
|
||||
if l.internetUp() {
|
||||
if lg.internetUp() {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-upc:
|
||||
fmt.Fprintf(l.stderr, "logtail: internet back up\n")
|
||||
fmt.Fprintf(lg.stderr, "logtail: internet back up\n")
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
@ -476,13 +476,13 @@ func (l *Logger) awaitInternetUp(ctx context.Context) {
|
||||
// upload uploads body to the log server.
|
||||
// origlen indicates the pre-compression body length.
|
||||
// origlen of -1 indicates that the body is not compressed.
|
||||
func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) {
|
||||
func (lg *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) {
|
||||
const maxUploadTime = 45 * time.Second
|
||||
ctx = sockstats.WithSockStats(ctx, l.sockstatsLabel.Load(), l.Logf)
|
||||
ctx = sockstats.WithSockStats(ctx, lg.sockstatsLabel.Load(), lg.Logf)
|
||||
ctx, cancel := context.WithTimeout(ctx, maxUploadTime)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", l.url, bytes.NewReader(body))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", lg.url, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
// I know of no conditions under which this could fail.
|
||||
// Report it very loudly.
|
||||
@ -513,8 +513,8 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft
|
||||
compressedNote = "compressed"
|
||||
}
|
||||
|
||||
l.httpDoCalls.Add(1)
|
||||
resp, err := l.httpc.Do(req)
|
||||
lg.httpDoCalls.Add(1)
|
||||
resp, err := lg.httpc.Do(req)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err)
|
||||
}
|
||||
@ -533,16 +533,16 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft
|
||||
//
|
||||
// TODO(bradfitz): this apparently just returns nil, as of tailscale/corp@9c2ec35.
|
||||
// Finish cleaning this up.
|
||||
func (l *Logger) Flush() error {
|
||||
func (lg *Logger) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartFlush starts a log upload, if anything is pending.
|
||||
//
|
||||
// If l is nil, StartFlush is a no-op.
|
||||
func (l *Logger) StartFlush() {
|
||||
if l != nil {
|
||||
l.tryDrainWake()
|
||||
func (lg *Logger) StartFlush() {
|
||||
if lg != nil {
|
||||
lg.tryDrainWake()
|
||||
}
|
||||
}
|
||||
|
||||
@ -558,41 +558,41 @@ var debugWakesAndUploads = envknob.RegisterBool("TS_DEBUG_LOGTAIL_WAKES")
|
||||
|
||||
// tryDrainWake tries to send to lg.drainWake, to cause an uploading wakeup.
|
||||
// It does not block.
|
||||
func (l *Logger) tryDrainWake() {
|
||||
l.flushPending.Store(false)
|
||||
func (lg *Logger) tryDrainWake() {
|
||||
lg.flushPending.Store(false)
|
||||
if debugWakesAndUploads() {
|
||||
// Using println instead of log.Printf here to avoid recursing back into
|
||||
// ourselves.
|
||||
println("logtail: try drain wake, numHTTP:", l.httpDoCalls.Load())
|
||||
println("logtail: try drain wake, numHTTP:", lg.httpDoCalls.Load())
|
||||
}
|
||||
select {
|
||||
case l.drainWake <- struct{}{}:
|
||||
case lg.drainWake <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) sendLocked(jsonBlob []byte) (int, error) {
|
||||
func (lg *Logger) sendLocked(jsonBlob []byte) (int, error) {
|
||||
tapSend(jsonBlob)
|
||||
if logtailDisabled.Load() {
|
||||
return len(jsonBlob), nil
|
||||
}
|
||||
|
||||
n, err := l.buffer.Write(jsonBlob)
|
||||
n, err := lg.buffer.Write(jsonBlob)
|
||||
|
||||
flushDelay := defaultFlushDelay
|
||||
if l.flushDelayFn != nil {
|
||||
flushDelay = l.flushDelayFn()
|
||||
if lg.flushDelayFn != nil {
|
||||
flushDelay = lg.flushDelayFn()
|
||||
}
|
||||
if flushDelay > 0 {
|
||||
if l.flushPending.CompareAndSwap(false, true) {
|
||||
if l.flushTimer == nil {
|
||||
l.flushTimer = l.clock.AfterFunc(flushDelay, l.tryDrainWake)
|
||||
if lg.flushPending.CompareAndSwap(false, true) {
|
||||
if lg.flushTimer == nil {
|
||||
lg.flushTimer = lg.clock.AfterFunc(flushDelay, lg.tryDrainWake)
|
||||
} else {
|
||||
l.flushTimer.Reset(flushDelay)
|
||||
lg.flushTimer.Reset(flushDelay)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
l.tryDrainWake()
|
||||
lg.tryDrainWake()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@ -600,13 +600,13 @@ func (l *Logger) sendLocked(jsonBlob []byte) (int, error) {
|
||||
// appendMetadata appends optional "logtail", "metrics", and "v" JSON members.
|
||||
// This assumes dst is already within a JSON object.
|
||||
// Each member is comma-terminated.
|
||||
func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte {
|
||||
func (lg *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte {
|
||||
// Append optional logtail metadata.
|
||||
if !skipClientTime || procID != 0 || procSequence != 0 || errDetail != "" || errData != nil {
|
||||
dst = append(dst, `"logtail":{`...)
|
||||
if !skipClientTime {
|
||||
dst = append(dst, `"client_time":"`...)
|
||||
dst = l.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano)
|
||||
dst = lg.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano)
|
||||
dst = append(dst, '"', ',')
|
||||
}
|
||||
if procID != 0 {
|
||||
@ -639,8 +639,8 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr
|
||||
}
|
||||
|
||||
// Append optional metrics metadata.
|
||||
if !skipMetrics && l.metricsDelta != nil {
|
||||
if d := l.metricsDelta(); d != "" {
|
||||
if !skipMetrics && lg.metricsDelta != nil {
|
||||
if d := lg.metricsDelta(); d != "" {
|
||||
dst = append(dst, `"metrics":"`...)
|
||||
dst = append(dst, d...)
|
||||
dst = append(dst, '"', ',')
|
||||
@ -660,10 +660,10 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr
|
||||
}
|
||||
|
||||
// appendText appends a raw text message in the Tailscale JSON log entry format.
|
||||
func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte {
|
||||
func (lg *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte {
|
||||
dst = slices.Grow(dst, len(src))
|
||||
dst = append(dst, '{')
|
||||
dst = l.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level)
|
||||
dst = lg.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level)
|
||||
if len(src) == 0 {
|
||||
dst = bytes.TrimRight(dst, ",")
|
||||
return append(dst, "}\n"...)
|
||||
@ -672,7 +672,7 @@ func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32,
|
||||
// Append the text string, which may be truncated.
|
||||
// Invalid UTF-8 will be mangled with the Unicode replacement character.
|
||||
max := maxTextSize
|
||||
if l.lowMem {
|
||||
if lg.lowMem {
|
||||
max /= lowMemRatio
|
||||
}
|
||||
dst = append(dst, `"text":`...)
|
||||
@ -697,12 +697,12 @@ func appendTruncatedString(dst, src []byte, n int) []byte {
|
||||
|
||||
// appendTextOrJSONLocked appends a raw text message or a raw JSON object
|
||||
// in the Tailscale JSON log format.
|
||||
func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
|
||||
if l.includeProcSequence {
|
||||
l.procSequence++
|
||||
func (lg *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
|
||||
if lg.includeProcSequence {
|
||||
lg.procSequence++
|
||||
}
|
||||
if len(src) == 0 || src[0] != '{' {
|
||||
return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level)
|
||||
return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level)
|
||||
}
|
||||
|
||||
// Check whether the input is a valid JSON object and
|
||||
@ -714,11 +714,11 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
|
||||
// However, bytes.NewBuffer normally allocates unless
|
||||
// we immediately shallow copy it into a pre-allocated Buffer struct.
|
||||
// See https://go.dev/issue/67004.
|
||||
l.bytesBuf = *bytes.NewBuffer(src)
|
||||
defer func() { l.bytesBuf = bytes.Buffer{} }() // avoid pinning src
|
||||
lg.bytesBuf = *bytes.NewBuffer(src)
|
||||
defer func() { lg.bytesBuf = bytes.Buffer{} }() // avoid pinning src
|
||||
|
||||
dec := &l.jsonDec
|
||||
dec.Reset(&l.bytesBuf)
|
||||
dec := &lg.jsonDec
|
||||
dec.Reset(&lg.bytesBuf)
|
||||
if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil {
|
||||
return false
|
||||
}
|
||||
@ -750,7 +750,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
|
||||
|
||||
// Treat invalid JSON as a raw text message.
|
||||
if !validJSON {
|
||||
return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level)
|
||||
return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level)
|
||||
}
|
||||
|
||||
// Check whether the JSON payload is too large.
|
||||
@ -758,13 +758,13 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
|
||||
// That's okay as the Tailscale log service limit is actually 2*maxSize.
|
||||
// However, so long as logging applications aim to target the maxSize limit,
|
||||
// there should be no trouble eventually uploading logs.
|
||||
maxLen := cmp.Or(l.maxUploadSize, maxSize)
|
||||
maxLen := cmp.Or(lg.maxUploadSize, maxSize)
|
||||
if len(src) > maxLen {
|
||||
errDetail := fmt.Sprintf("entry too large: %d bytes", len(src))
|
||||
errData := appendTruncatedString(nil, src, maxLen/len(`\uffff`)) // escaping could increase size
|
||||
|
||||
dst = append(dst, '{')
|
||||
dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level)
|
||||
dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level)
|
||||
dst = bytes.TrimRight(dst, ",")
|
||||
return append(dst, "}\n"...)
|
||||
}
|
||||
@ -781,7 +781,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
|
||||
}
|
||||
dst = slices.Grow(dst, len(src))
|
||||
dst = append(dst, '{')
|
||||
dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level)
|
||||
dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level)
|
||||
if logtailValLength > 0 {
|
||||
// Exclude original logtail member from the message.
|
||||
dst = appendWithoutNewline(dst, src[len("{"):logtailKeyOffset])
|
||||
@ -808,8 +808,8 @@ func appendWithoutNewline(dst, src []byte) []byte {
|
||||
}
|
||||
|
||||
// Logf logs to l using the provided fmt-style format and optional arguments.
|
||||
func (l *Logger) Logf(format string, args ...any) {
|
||||
fmt.Fprintf(l, format, args...)
|
||||
func (lg *Logger) Logf(format string, args ...any) {
|
||||
fmt.Fprintf(lg, format, args...)
|
||||
}
|
||||
|
||||
// Write logs an encoded JSON blob.
|
||||
@ -818,29 +818,29 @@ func (l *Logger) Logf(format string, args ...any) {
|
||||
// then contents is fit into a JSON blob and written.
|
||||
//
|
||||
// This is intended as an interface for the stdlib "log" package.
|
||||
func (l *Logger) Write(buf []byte) (int, error) {
|
||||
func (lg *Logger) Write(buf []byte) (int, error) {
|
||||
if len(buf) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
inLen := len(buf) // length as provided to us, before modifications to downstream writers
|
||||
|
||||
level, buf := parseAndRemoveLogLevel(buf)
|
||||
if l.stderr != nil && l.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&l.stderrLevel) {
|
||||
if lg.stderr != nil && lg.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&lg.stderrLevel) {
|
||||
if buf[len(buf)-1] == '\n' {
|
||||
l.stderr.Write(buf)
|
||||
lg.stderr.Write(buf)
|
||||
} else {
|
||||
// The log package always line-terminates logs,
|
||||
// so this is an uncommon path.
|
||||
withNL := append(buf[:len(buf):len(buf)], '\n')
|
||||
l.stderr.Write(withNL)
|
||||
lg.stderr.Write(withNL)
|
||||
}
|
||||
}
|
||||
|
||||
l.writeLock.Lock()
|
||||
defer l.writeLock.Unlock()
|
||||
lg.writeLock.Lock()
|
||||
defer lg.writeLock.Unlock()
|
||||
|
||||
b := l.appendTextOrJSONLocked(l.writeBuf[:0], buf, level)
|
||||
_, err := l.sendLocked(b)
|
||||
b := lg.appendTextOrJSONLocked(lg.writeBuf[:0], buf, level)
|
||||
_, err := lg.sendLocked(b)
|
||||
return inLen, err
|
||||
}
|
||||
|
||||
|
||||
@ -29,11 +29,11 @@ func TestFastShutdown(t *testing.T) {
|
||||
func(w http.ResponseWriter, r *http.Request) {}))
|
||||
defer testServ.Close()
|
||||
|
||||
l := NewLogger(Config{
|
||||
logger := NewLogger(Config{
|
||||
BaseURL: testServ.URL,
|
||||
Bus: eventbustest.NewBus(t),
|
||||
}, t.Logf)
|
||||
err := l.Shutdown(ctx)
|
||||
err := logger.Shutdown(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -64,7 +64,7 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) {
|
||||
|
||||
t.Cleanup(ts.srv.Close)
|
||||
|
||||
l := NewLogger(Config{
|
||||
logger := NewLogger(Config{
|
||||
BaseURL: ts.srv.URL,
|
||||
Bus: eventbustest.NewBus(t),
|
||||
}, t.Logf)
|
||||
@ -75,14 +75,14 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) {
|
||||
t.Errorf("unknown start logging statement: %q", string(body))
|
||||
}
|
||||
|
||||
return &ts, l
|
||||
return &ts, logger
|
||||
}
|
||||
|
||||
func TestDrainPendingMessages(t *testing.T) {
|
||||
ts, l := NewLogtailTestHarness(t)
|
||||
ts, logger := NewLogtailTestHarness(t)
|
||||
|
||||
for range logLines {
|
||||
l.Write([]byte("log line"))
|
||||
logger.Write([]byte("log line"))
|
||||
}
|
||||
|
||||
// all of the "log line" messages usually arrive at once, but poll if needed.
|
||||
@ -96,14 +96,14 @@ func TestDrainPendingMessages(t *testing.T) {
|
||||
// if we never find count == logLines, the test will eventually time out.
|
||||
}
|
||||
|
||||
err := l.Shutdown(context.Background())
|
||||
err := logger.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeAndUploadMessages(t *testing.T) {
|
||||
ts, l := NewLogtailTestHarness(t)
|
||||
ts, logger := NewLogtailTestHarness(t)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -123,7 +123,7 @@ func TestEncodeAndUploadMessages(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
io.WriteString(l, tt.log)
|
||||
io.WriteString(logger, tt.log)
|
||||
body := <-ts.uploaded
|
||||
|
||||
data := unmarshalOne(t, body)
|
||||
@ -144,7 +144,7 @@ func TestEncodeAndUploadMessages(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err := l.Shutdown(context.Background())
|
||||
err := logger.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -322,9 +322,9 @@ func TestLoggerWriteResult(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAppendMetadata(t *testing.T) {
|
||||
var l Logger
|
||||
l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
|
||||
l.metricsDelta = func() string { return "metrics" }
|
||||
var lg Logger
|
||||
lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
|
||||
lg.metricsDelta = func() string { return "metrics" }
|
||||
|
||||
for _, tt := range []struct {
|
||||
skipClientTime bool
|
||||
@ -350,7 +350,7 @@ func TestAppendMetadata(t *testing.T) {
|
||||
{procID: 1, procSeq: 2, errDetail: "error", errData: jsontext.Value(`["something","bad","happened"]`), level: 2,
|
||||
want: `"logtail":{"client_time":"2000-01-01T00:00:00Z","proc_id":1,"proc_seq":2,"error":{"detail":"error","bad_data":["something","bad","happened"]}},"metrics":"metrics","v":2,`},
|
||||
} {
|
||||
got := string(l.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level))
|
||||
got := string(lg.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level))
|
||||
if got != tt.want {
|
||||
t.Errorf("appendMetadata(%v, %v, %v, %v, %v, %v, %v):\n\tgot %s\n\twant %s", tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level, got, tt.want)
|
||||
}
|
||||
@ -362,10 +362,10 @@ func TestAppendMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAppendText(t *testing.T) {
|
||||
var l Logger
|
||||
l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
|
||||
l.metricsDelta = func() string { return "metrics" }
|
||||
l.lowMem = true
|
||||
var lg Logger
|
||||
lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
|
||||
lg.metricsDelta = func() string { return "metrics" }
|
||||
lg.lowMem = true
|
||||
|
||||
for _, tt := range []struct {
|
||||
text string
|
||||
@ -382,7 +382,7 @@ func TestAppendText(t *testing.T) {
|
||||
{text: "\b\f\n\r\t\"\\", want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"\b\f\n\r\t\"\\"}`},
|
||||
{text: "x" + strings.Repeat("😐", maxSize), want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"x` + strings.Repeat("😐", 1023) + `…+1044484"}`},
|
||||
} {
|
||||
got := string(l.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level))
|
||||
got := string(lg.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level))
|
||||
if !strings.HasSuffix(got, "\n") {
|
||||
t.Errorf("`%s` does not end with a newline", got)
|
||||
}
|
||||
@ -397,10 +397,10 @@ func TestAppendText(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAppendTextOrJSON(t *testing.T) {
|
||||
var l Logger
|
||||
l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
|
||||
l.metricsDelta = func() string { return "metrics" }
|
||||
l.lowMem = true
|
||||
var lg Logger
|
||||
lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)})
|
||||
lg.metricsDelta = func() string { return "metrics" }
|
||||
lg.lowMem = true
|
||||
|
||||
for _, tt := range []struct {
|
||||
in string
|
||||
@ -419,7 +419,7 @@ func TestAppendTextOrJSON(t *testing.T) {
|
||||
{in: `{ "fizz" : "buzz" , "logtail" : "duplicate" , "wizz" : "wuzz" }`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"duplicate logtail member","bad_data":"duplicate"}}, "fizz" : "buzz" , "wizz" : "wuzz"}`},
|
||||
{in: `{"long":"` + strings.Repeat("a", maxSize) + `"}`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"entry too large: 262155 bytes","bad_data":"{\"long\":\"` + strings.Repeat("a", 43681) + `…+218465"}}}`},
|
||||
} {
|
||||
got := string(l.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level))
|
||||
got := string(lg.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level))
|
||||
if !strings.HasSuffix(got, "\n") {
|
||||
t.Errorf("`%s` does not end with a newline", got)
|
||||
}
|
||||
@ -461,21 +461,21 @@ var testdataTextLog = []byte(`netcheck: report: udp=true v6=false v6os=true mapv
|
||||
var testdataJSONLog = []byte(`{"end":"2024-04-08T21:39:15.715291586Z","nodeId":"nQRJBE7CNTRL","physicalTraffic":[{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"98.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"24.x.x.x:49973","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"73.x.x.x:41641","rxBytes":732,"rxPkts":6,"src":"100.x.x.x:0","txBytes":820,"txPkts":7},{"dst":"75.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"75.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"174.x.x.x:35497","rxBytes":13008,"rxPkts":98,"src":"100.x.x.x:0","txBytes":26688,"txPkts":150},{"dst":"47.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"64.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5}],"start":"2024-04-08T21:39:11.099495616Z","virtualTraffic":[{"dst":"100.x.x.x:33008","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32984","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:32998","proto":6,"src":"100.x.x.x:22","txBytes":1020,"txPkts":10},{"dst":"100.x.x.x:32994","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:32980","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32950","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53332","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:0","proto":1,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32966","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57882","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53326","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57892","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:32934","proto":6,"src":"100.x.x.x:22","txBytes":8712,"txPkts":55},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32942","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32964","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37238","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37252","txBytes":60,"txPkts":1}]}`)
|
||||
|
||||
func BenchmarkWriteText(b *testing.B) {
|
||||
var l Logger
|
||||
l.clock = tstime.StdClock{}
|
||||
l.buffer = discardBuffer{}
|
||||
var lg Logger
|
||||
lg.clock = tstime.StdClock{}
|
||||
lg.buffer = discardBuffer{}
|
||||
b.ReportAllocs()
|
||||
for range b.N {
|
||||
must.Get(l.Write(testdataTextLog))
|
||||
must.Get(lg.Write(testdataTextLog))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteJSON(b *testing.B) {
|
||||
var l Logger
|
||||
l.clock = tstime.StdClock{}
|
||||
l.buffer = discardBuffer{}
|
||||
var lg Logger
|
||||
lg.clock = tstime.StdClock{}
|
||||
lg.buffer = discardBuffer{}
|
||||
b.ReportAllocs()
|
||||
for range b.N {
|
||||
must.Get(l.Write(testdataJSONLog))
|
||||
must.Get(lg.Write(testdataJSONLog))
|
||||
}
|
||||
}
|
||||
|
||||
@ -303,21 +303,21 @@ func formatPrefixTable(addr uint8, len int) string {
|
||||
//
|
||||
// For example, childPrefixOf("192.168.0.0/16", 8) == "192.168.8.0/24".
|
||||
func childPrefixOf(parent netip.Prefix, stride uint8) netip.Prefix {
|
||||
l := parent.Bits()
|
||||
if l%8 != 0 {
|
||||
ln := parent.Bits()
|
||||
if ln%8 != 0 {
|
||||
panic("parent prefix is not 8-bit aligned")
|
||||
}
|
||||
if l >= parent.Addr().BitLen() {
|
||||
if ln >= parent.Addr().BitLen() {
|
||||
panic("parent prefix cannot be extended further")
|
||||
}
|
||||
off := l / 8
|
||||
off := ln / 8
|
||||
if parent.Addr().Is4() {
|
||||
bs := parent.Addr().As4()
|
||||
bs[off] = stride
|
||||
return netip.PrefixFrom(netip.AddrFrom4(bs), l+8)
|
||||
return netip.PrefixFrom(netip.AddrFrom4(bs), ln+8)
|
||||
} else {
|
||||
bs := parent.Addr().As16()
|
||||
bs[off] = stride
|
||||
return netip.PrefixFrom(netip.AddrFrom16(bs), l+8)
|
||||
return netip.PrefixFrom(netip.AddrFrom16(bs), ln+8)
|
||||
}
|
||||
}
|
||||
|
||||
@ -377,8 +377,8 @@ func pfxMask(pfxLen int) uint8 {
|
||||
func allPrefixes() []slowEntry[int] {
|
||||
ret := make([]slowEntry[int], 0, lastHostIndex)
|
||||
for i := 1; i < lastHostIndex+1; i++ {
|
||||
a, l := inversePrefixIndex(i)
|
||||
ret = append(ret, slowEntry[int]{a, l, i})
|
||||
a, ln := inversePrefixIndex(i)
|
||||
ret = append(ret, slowEntry[int]{a, ln, i})
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
@ -550,8 +550,8 @@ func genRandomSubdomains(t *testing.T, n int) []dnsname.FQDN {
|
||||
const charset = "abcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
for len(domains) < cap(domains) {
|
||||
l := r.Intn(19) + 1
|
||||
b := make([]byte, l)
|
||||
ln := r.Intn(19) + 1
|
||||
b := make([]byte, ln)
|
||||
for i := range b {
|
||||
b[i] = charset[r.Intn(len(charset))]
|
||||
}
|
||||
|
||||
@ -19,11 +19,11 @@ func TestSetUserTimeout(t *testing.T) {
|
||||
// set in ktimeout.UserTimeout above.
|
||||
lc.SetMultipathTCP(false)
|
||||
|
||||
l := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0"))
|
||||
defer l.Close()
|
||||
ln := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0"))
|
||||
defer ln.Close()
|
||||
|
||||
var err error
|
||||
if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) {
|
||||
if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) {
|
||||
err = SetUserTimeout(fd, 0)
|
||||
}); e != nil {
|
||||
t.Fatal(e)
|
||||
@ -31,12 +31,12 @@ func TestSetUserTimeout(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
v := must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT))
|
||||
v := must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT))
|
||||
if v != 0 {
|
||||
t.Errorf("TCP_USER_TIMEOUT: got %v; want 0", v)
|
||||
}
|
||||
|
||||
if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) {
|
||||
if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) {
|
||||
err = SetUserTimeout(fd, 30*time.Second)
|
||||
}); e != nil {
|
||||
t.Fatal(e)
|
||||
@ -44,7 +44,7 @@ func TestSetUserTimeout(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
v = must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT))
|
||||
v = must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT))
|
||||
if v != 30000 {
|
||||
t.Errorf("TCP_USER_TIMEOUT: got %v; want 30000", v)
|
||||
}
|
||||
|
||||
@ -14,11 +14,11 @@ func ExampleUserTimeout() {
|
||||
lc := net.ListenConfig{
|
||||
Control: UserTimeout(30 * time.Second),
|
||||
}
|
||||
l, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0")
|
||||
ln, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
return
|
||||
}
|
||||
l.Close()
|
||||
ln.Close()
|
||||
// Output:
|
||||
}
|
||||
|
||||
@ -39,16 +39,16 @@ func Listen(addr string) *Listener {
|
||||
}
|
||||
|
||||
// Addr implements net.Listener.Addr.
|
||||
func (l *Listener) Addr() net.Addr {
|
||||
return l.addr
|
||||
func (ln *Listener) Addr() net.Addr {
|
||||
return ln.addr
|
||||
}
|
||||
|
||||
// Close closes the pipe listener.
|
||||
func (l *Listener) Close() error {
|
||||
func (ln *Listener) Close() error {
|
||||
var cleanup func()
|
||||
l.closeOnce.Do(func() {
|
||||
cleanup = l.onClose
|
||||
close(l.closed)
|
||||
ln.closeOnce.Do(func() {
|
||||
cleanup = ln.onClose
|
||||
close(ln.closed)
|
||||
})
|
||||
if cleanup != nil {
|
||||
cleanup()
|
||||
@ -57,11 +57,11 @@ func (l *Listener) Close() error {
|
||||
}
|
||||
|
||||
// Accept blocks until a new connection is available or the listener is closed.
|
||||
func (l *Listener) Accept() (net.Conn, error) {
|
||||
func (ln *Listener) Accept() (net.Conn, error) {
|
||||
select {
|
||||
case c := <-l.ch:
|
||||
case c := <-ln.ch:
|
||||
return c, nil
|
||||
case <-l.closed:
|
||||
case <-ln.closed:
|
||||
return nil, net.ErrClosed
|
||||
}
|
||||
}
|
||||
@ -70,18 +70,18 @@ func (l *Listener) Accept() (net.Conn, error) {
|
||||
// The provided Context must be non-nil. If the context expires before the
|
||||
// connection is complete, an error is returned. Once successfully connected
|
||||
// any expiration of the context will not affect the connection.
|
||||
func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) {
|
||||
func (ln *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) {
|
||||
if !strings.HasSuffix(network, "tcp") {
|
||||
return nil, net.UnknownNetworkError(network)
|
||||
}
|
||||
if connAddr(addr) != l.addr {
|
||||
if connAddr(addr) != ln.addr {
|
||||
return nil, &net.AddrError{
|
||||
Err: "invalid address",
|
||||
Addr: addr,
|
||||
}
|
||||
}
|
||||
|
||||
newConn := l.NewConn
|
||||
newConn := ln.NewConn
|
||||
if newConn == nil {
|
||||
newConn = func(network, addr string, maxBuf int) (Conn, Conn) {
|
||||
return NewConn(addr, maxBuf)
|
||||
@ -98,9 +98,9 @@ func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn,
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-l.closed:
|
||||
case <-ln.closed:
|
||||
return nil, net.ErrClosed
|
||||
case l.ch <- s:
|
||||
case ln.ch <- s:
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func TestListener(t *testing.T) {
|
||||
l := Listen("srv.local")
|
||||
defer l.Close()
|
||||
ln := Listen("srv.local")
|
||||
defer ln.Close()
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
@ -20,11 +20,11 @@ func TestListener(t *testing.T) {
|
||||
defer c.Close()
|
||||
}()
|
||||
|
||||
if c, err := l.Dial(context.Background(), "tcp", "invalid"); err == nil {
|
||||
if c, err := ln.Dial(context.Background(), "tcp", "invalid"); err == nil {
|
||||
c.Close()
|
||||
t.Fatalf("dial to invalid address succeeded")
|
||||
}
|
||||
c, err := l.Dial(context.Background(), "tcp", "srv.local")
|
||||
c, err := ln.Dial(context.Background(), "tcp", "srv.local")
|
||||
if err != nil {
|
||||
t.Fatalf("dial failed: %v", err)
|
||||
return
|
||||
|
||||
@ -34,7 +34,7 @@ func FromStdIPNet(std *net.IPNet) (prefix netip.Prefix, ok bool) {
|
||||
}
|
||||
ip = ip.Unmap()
|
||||
|
||||
if l := len(std.Mask); l != net.IPv4len && l != net.IPv6len {
|
||||
if ln := len(std.Mask); ln != net.IPv4len && ln != net.IPv6len {
|
||||
// Invalid mask.
|
||||
return netip.Prefix{}, false
|
||||
}
|
||||
|
||||
@ -993,9 +993,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
|
||||
c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err)
|
||||
} else {
|
||||
rs.mu.Lock()
|
||||
if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
|
||||
if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
|
||||
mak.Set(&rs.report.RegionLatency, reg.RegionID, d)
|
||||
} else if l >= d {
|
||||
} else if latency >= d {
|
||||
rs.report.RegionLatency[reg.RegionID] = d
|
||||
}
|
||||
// We set these IPv4 and IPv6 but they're not really used
|
||||
@ -1214,9 +1214,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee
|
||||
} else if ok {
|
||||
c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d)
|
||||
rs.mu.Lock()
|
||||
if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
|
||||
if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
|
||||
mak.Set(&rs.report.RegionLatency, reg.RegionID, d)
|
||||
} else if l >= d {
|
||||
} else if latency >= d {
|
||||
rs.report.RegionLatency[reg.RegionID] = d
|
||||
}
|
||||
|
||||
|
||||
@ -120,10 +120,10 @@ func (s *Server) logf(format string, args ...any) {
|
||||
}
|
||||
|
||||
// Serve accepts and handles incoming connections on the given listener.
|
||||
func (s *Server) Serve(l net.Listener) error {
|
||||
defer l.Close()
|
||||
func (s *Server) Serve(ln net.Listener) error {
|
||||
defer ln.Close()
|
||||
for {
|
||||
c, err := l.Accept()
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -17,9 +17,9 @@ import (
|
||||
// connections and handles each one in a goroutine. Because it runs in an infinite loop,
|
||||
// this function only returns if any of the speedtests return with errors, or if the
|
||||
// listener is closed.
|
||||
func Serve(l net.Listener) error {
|
||||
func Serve(ln net.Listener) error {
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
conn, err := ln.Accept()
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -21,13 +21,13 @@ func TestDownload(t *testing.T) {
|
||||
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338")
|
||||
|
||||
// start a listener and find the port where the server will be listening.
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
ln, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() { l.Close() })
|
||||
t.Cleanup(func() { ln.Close() })
|
||||
|
||||
serverIP := l.Addr().String()
|
||||
serverIP := ln.Addr().String()
|
||||
t.Log("server IP found:", serverIP)
|
||||
|
||||
type state struct {
|
||||
@ -40,7 +40,7 @@ func TestDownload(t *testing.T) {
|
||||
stateChan := make(chan state, 1)
|
||||
|
||||
go func() {
|
||||
err := Serve(l)
|
||||
err := Serve(ln)
|
||||
stateChan <- state{err: err}
|
||||
}()
|
||||
|
||||
@ -84,7 +84,7 @@ func TestDownload(t *testing.T) {
|
||||
})
|
||||
|
||||
// causes the server goroutine to finish
|
||||
l.Close()
|
||||
ln.Close()
|
||||
|
||||
testState := <-stateChan
|
||||
if testState.err != nil {
|
||||
|
||||
@ -166,14 +166,14 @@ var (
|
||||
func findArchAndVersion(control []byte) (arch string, version string, err error) {
|
||||
b := bytes.NewBuffer(control)
|
||||
for {
|
||||
l, err := b.ReadBytes('\n')
|
||||
ln, err := b.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if bytes.HasPrefix(l, archKey) {
|
||||
arch = string(bytes.TrimSpace(l[len(archKey):]))
|
||||
} else if bytes.HasPrefix(l, versionKey) {
|
||||
version = string(bytes.TrimSpace(l[len(versionKey):]))
|
||||
if bytes.HasPrefix(ln, archKey) {
|
||||
arch = string(bytes.TrimSpace(ln[len(archKey):]))
|
||||
} else if bytes.HasPrefix(ln, versionKey) {
|
||||
version = string(bytes.TrimSpace(ln[len(versionKey):]))
|
||||
}
|
||||
if arch != "" && version != "" {
|
||||
return arch, version, nil
|
||||
|
||||
@ -323,14 +323,14 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass {
|
||||
"derp_path": derpPath,
|
||||
"tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil),
|
||||
},
|
||||
Metrics: func(l prometheus.Labels) []prometheus.Metric {
|
||||
Metrics: func(lb prometheus.Labels) []prometheus.Metric {
|
||||
metrics := []prometheus.Metric{
|
||||
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)),
|
||||
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()),
|
||||
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, lb), prometheus.GaugeValue, float64(size)),
|
||||
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, lb), prometheus.CounterValue, transferTimeSeconds.Value()),
|
||||
}
|
||||
if d.bwTUNIPv4Prefix != nil {
|
||||
// For TCP-in-TCP probes, also record cumulative bytes transferred.
|
||||
metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, totalBytesTransferred.Value()))
|
||||
metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, lb), prometheus.CounterValue, totalBytesTransferred.Value()))
|
||||
}
|
||||
return metrics
|
||||
},
|
||||
@ -361,11 +361,11 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa
|
||||
},
|
||||
Class: "derp_qd",
|
||||
Labels: Labels{"derp_path": derpPath},
|
||||
Metrics: func(l prometheus.Labels) []prometheus.Metric {
|
||||
Metrics: func(lb prometheus.Labels) []prometheus.Metric {
|
||||
qdh.mx.Lock()
|
||||
result := []prometheus.Metric{
|
||||
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, l), prometheus.CounterValue, float64(packetsDropped.Value())),
|
||||
prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)),
|
||||
prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, lb), prometheus.CounterValue, float64(packetsDropped.Value())),
|
||||
prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, lb), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)),
|
||||
}
|
||||
qdh.mx.Unlock()
|
||||
return result
|
||||
@ -1046,11 +1046,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
|
||||
}()
|
||||
|
||||
// Start a listener to receive the data
|
||||
l, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0"))
|
||||
ln, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen: %s", err)
|
||||
}
|
||||
defer l.Close()
|
||||
defer ln.Close()
|
||||
|
||||
// 128KB by default
|
||||
const writeChunkSize = 128 << 10
|
||||
@ -1062,9 +1062,9 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
|
||||
}
|
||||
|
||||
// Dial ourselves
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
_, port, err := net.SplitHostPort(ln.Addr().String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to split address %q: %w", l.Addr().String(), err)
|
||||
return fmt.Errorf("failed to split address %q: %w", ln.Addr().String(), err)
|
||||
}
|
||||
|
||||
connAddr := net.JoinHostPort(destinationAddr.String(), port)
|
||||
@ -1085,7 +1085,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
readConn, err := l.Accept()
|
||||
readConn, err := ln.Accept()
|
||||
if err != nil {
|
||||
readFinishedC <- err
|
||||
return
|
||||
@ -1146,11 +1146,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT
|
||||
|
||||
func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool, meshKey key.DERPMesh) (*derphttp.Client, error) {
|
||||
// To avoid spamming the log with regular connection messages.
|
||||
l := logger.Filtered(log.Printf, func(s string) bool {
|
||||
logf := logger.Filtered(log.Printf, func(s string) bool {
|
||||
return !strings.Contains(s, "derphttp.Client.Connect: connecting to")
|
||||
})
|
||||
priv := key.NewNode()
|
||||
dc := derphttp.NewRegionClient(priv, l, netmon.NewStatic(), func() *tailcfg.DERPRegion {
|
||||
dc := derphttp.NewRegionClient(priv, logf, netmon.NewStatic(), func() *tailcfg.DERPRegion {
|
||||
rid := n.RegionID
|
||||
return &tailcfg.DERPRegion{
|
||||
RegionID: rid,
|
||||
|
||||
@ -118,25 +118,25 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob
|
||||
panic(fmt.Sprintf("probe named %q already registered", name))
|
||||
}
|
||||
|
||||
l := prometheus.Labels{
|
||||
lb := prometheus.Labels{
|
||||
"name": name,
|
||||
"class": pc.Class,
|
||||
}
|
||||
for k, v := range pc.Labels {
|
||||
l[k] = v
|
||||
lb[k] = v
|
||||
}
|
||||
for k, v := range labels {
|
||||
l[k] = v
|
||||
lb[k] = v
|
||||
}
|
||||
|
||||
probe := newProbe(p, name, interval, l, pc)
|
||||
probe := newProbe(p, name, interval, lb, pc)
|
||||
p.probes[name] = probe
|
||||
go probe.loop()
|
||||
return probe
|
||||
}
|
||||
|
||||
// newProbe creates a new Probe with the given parameters, but does not start it.
|
||||
func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Labels, pc ProbeClass) *Probe {
|
||||
func newProbe(p *Prober, name string, interval time.Duration, lg prometheus.Labels, pc ProbeClass) *Probe {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
probe := &Probe{
|
||||
prober: p,
|
||||
@ -155,17 +155,17 @@ func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Label
|
||||
latencyHist: ring.New(recentHistSize),
|
||||
|
||||
metrics: prometheus.NewRegistry(),
|
||||
metricLabels: l,
|
||||
mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, l),
|
||||
mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, l),
|
||||
mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, l),
|
||||
mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, l),
|
||||
mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, l),
|
||||
metricLabels: lg,
|
||||
mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, lg),
|
||||
mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, lg),
|
||||
mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, lg),
|
||||
mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, lg),
|
||||
mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, lg),
|
||||
mAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: l,
|
||||
Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: lg,
|
||||
}, []string{"status"}),
|
||||
mSeconds: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: l,
|
||||
Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: lg,
|
||||
}, []string{"status"}),
|
||||
}
|
||||
if p.metrics != nil {
|
||||
@ -512,8 +512,8 @@ func (probe *Probe) probeInfoLocked() ProbeInfo {
|
||||
inf.Latency = probe.latency
|
||||
}
|
||||
probe.latencyHist.Do(func(v any) {
|
||||
if l, ok := v.(time.Duration); ok {
|
||||
inf.RecentLatencies = append(inf.RecentLatencies, l)
|
||||
if latency, ok := v.(time.Duration); ok {
|
||||
inf.RecentLatencies = append(inf.RecentLatencies, latency)
|
||||
}
|
||||
})
|
||||
probe.successHist.Do(func(v any) {
|
||||
@ -719,8 +719,8 @@ func initialDelay(seed string, interval time.Duration) time.Duration {
|
||||
// Labels is a set of metric labels used by a prober.
|
||||
type Labels map[string]string
|
||||
|
||||
func (l Labels) With(k, v string) Labels {
|
||||
new := maps.Clone(l)
|
||||
func (lb Labels) With(k, v string) Labels {
|
||||
new := maps.Clone(lb)
|
||||
new[k] = v
|
||||
return new
|
||||
}
|
||||
|
||||
@ -31,8 +31,8 @@ func (h AUMHash) String() string {
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (h *AUMHash) UnmarshalText(text []byte) error {
|
||||
if l := base32StdNoPad.DecodedLen(len(text)); l != len(h) {
|
||||
return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", l, len(text))
|
||||
if ln := base32StdNoPad.DecodedLen(len(text)); ln != len(h) {
|
||||
return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", ln, len(text))
|
||||
}
|
||||
if _, err := base32StdNoPad.Decode(h[:], text); err != nil {
|
||||
return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err)
|
||||
|
||||
@ -76,8 +76,8 @@ func TestSigNested(t *testing.T) {
|
||||
if err := nestedSig.verifySignature(oldNode.Public(), k); err != nil {
|
||||
t.Fatalf("verifySignature(oldNode) failed: %v", err)
|
||||
}
|
||||
if l := sigChainLength(nestedSig); l != 1 {
|
||||
t.Errorf("nestedSig chain length = %v, want 1", l)
|
||||
if ln := sigChainLength(nestedSig); ln != 1 {
|
||||
t.Errorf("nestedSig chain length = %v, want 1", ln)
|
||||
}
|
||||
|
||||
// The signature authorizing the rotation, signed by the
|
||||
@ -93,8 +93,8 @@ func TestSigNested(t *testing.T) {
|
||||
if err := sig.verifySignature(node.Public(), k); err != nil {
|
||||
t.Fatalf("verifySignature(node) failed: %v", err)
|
||||
}
|
||||
if l := sigChainLength(sig); l != 2 {
|
||||
t.Errorf("sig chain length = %v, want 2", l)
|
||||
if ln := sigChainLength(sig); ln != 2 {
|
||||
t.Errorf("sig chain length = %v, want 2", ln)
|
||||
}
|
||||
|
||||
// Test verification fails if the wrong verification key is provided
|
||||
|
||||
@ -92,8 +92,8 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
slices.Sort(lines)
|
||||
for _, l := range lines {
|
||||
_, err = w.Write([]byte(fmt.Sprintf("%s\n", l)))
|
||||
for _, ln := range lines {
|
||||
_, err = w.Write([]byte(fmt.Sprintf("%s\n", ln)))
|
||||
if err != nil {
|
||||
log.Printf("monitor: error writing status: %v", err)
|
||||
return
|
||||
|
||||
@ -75,10 +75,10 @@ func fromCommand(bs []byte) (string, error) {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (f *fsm) Apply(l *raft.Log) any {
|
||||
func (f *fsm) Apply(lg *raft.Log) any {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
s, err := fromCommand(l.Data)
|
||||
s, err := fromCommand(lg.Data)
|
||||
if err != nil {
|
||||
return CommandResult{
|
||||
Err: err,
|
||||
|
||||
@ -1021,11 +1021,11 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string {
|
||||
}
|
||||
var b strings.Builder
|
||||
b.WriteString("{")
|
||||
for i, l := range labels {
|
||||
for i, lb := range labels {
|
||||
if i > 0 {
|
||||
b.WriteString(",")
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("%s=%q", l.GetName(), l.GetValue()))
|
||||
b.WriteString(fmt.Sprintf("%s=%q", lb.GetName(), lb.GetValue()))
|
||||
}
|
||||
b.WriteString("}")
|
||||
return b.String()
|
||||
@ -1033,8 +1033,8 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string {
|
||||
|
||||
// sendData sends a given amount of bytes from s1 to s2.
|
||||
func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error {
|
||||
l := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip)))
|
||||
defer l.Close()
|
||||
lb := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip)))
|
||||
defer lb.Close()
|
||||
|
||||
// Dial to s1 from s2
|
||||
w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip))
|
||||
@ -1049,7 +1049,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC
|
||||
defer close(allReceived)
|
||||
|
||||
go func() {
|
||||
conn, err := l.Accept()
|
||||
conn, err := lb.Accept()
|
||||
if err != nil {
|
||||
allReceived <- err
|
||||
return
|
||||
|
||||
@ -184,14 +184,14 @@ type ipMapping struct {
|
||||
// it is difficult to be 100% sure. This function should be used with care. It
|
||||
// will probably do what you want, but it is very easy to hold this wrong.
|
||||
func getProbablyFreePortNumber() (int, error) {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
ln, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
defer l.Close()
|
||||
defer ln.Close()
|
||||
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
_, port, err := net.SplitHostPort(ln.Addr().String())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@ -628,8 +628,8 @@ type loggingResponseWriter struct {
|
||||
// from r, or falls back to logf. If a nil logger is given, the logs are
|
||||
// discarded.
|
||||
func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Request) *loggingResponseWriter {
|
||||
if l, ok := logger.LogfKey.ValueOk(r.Context()); ok && l != nil {
|
||||
logf = l
|
||||
if lg, ok := logger.LogfKey.ValueOk(r.Context()); ok && lg != nil {
|
||||
logf = lg
|
||||
}
|
||||
if logf == nil {
|
||||
logf = logger.Discard
|
||||
@ -642,46 +642,46 @@ func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// WriteHeader implements [http.ResponseWriter].
|
||||
func (l *loggingResponseWriter) WriteHeader(statusCode int) {
|
||||
if l.code != 0 {
|
||||
l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode)
|
||||
func (lg *loggingResponseWriter) WriteHeader(statusCode int) {
|
||||
if lg.code != 0 {
|
||||
lg.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", lg.code, statusCode)
|
||||
return
|
||||
}
|
||||
if l.ctx.Err() == nil {
|
||||
l.code = statusCode
|
||||
if lg.ctx.Err() == nil {
|
||||
lg.code = statusCode
|
||||
}
|
||||
l.ResponseWriter.WriteHeader(statusCode)
|
||||
lg.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
// Write implements [http.ResponseWriter].
|
||||
func (l *loggingResponseWriter) Write(bs []byte) (int, error) {
|
||||
if l.code == 0 {
|
||||
l.code = 200
|
||||
func (lg *loggingResponseWriter) Write(bs []byte) (int, error) {
|
||||
if lg.code == 0 {
|
||||
lg.code = 200
|
||||
}
|
||||
n, err := l.ResponseWriter.Write(bs)
|
||||
l.bytes += n
|
||||
n, err := lg.ResponseWriter.Write(bs)
|
||||
lg.bytes += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Hijack implements http.Hijacker. Note that hijacking can still fail
|
||||
// because the wrapped ResponseWriter is not required to implement
|
||||
// Hijacker, as this breaks HTTP/2.
|
||||
func (l *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h, ok := l.ResponseWriter.(http.Hijacker)
|
||||
func (lg *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h, ok := lg.ResponseWriter.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("ResponseWriter is not a Hijacker")
|
||||
}
|
||||
conn, buf, err := h.Hijack()
|
||||
if err == nil {
|
||||
l.hijacked = true
|
||||
lg.hijacked = true
|
||||
}
|
||||
return conn, buf, err
|
||||
}
|
||||
|
||||
func (l loggingResponseWriter) Flush() {
|
||||
f, _ := l.ResponseWriter.(http.Flusher)
|
||||
func (lg loggingResponseWriter) Flush() {
|
||||
f, _ := lg.ResponseWriter.(http.Flusher)
|
||||
if f == nil {
|
||||
l.logf("[unexpected] tried to Flush a ResponseWriter that can't flush")
|
||||
lg.logf("[unexpected] tried to Flush a ResponseWriter that can't flush")
|
||||
return
|
||||
}
|
||||
f.Flush()
|
||||
|
||||
@ -32,20 +32,20 @@ func TestPointAnonymize(t *testing.T) {
|
||||
last := geo.MakePoint(llat, 0)
|
||||
cur := geo.MakePoint(lat, 0)
|
||||
anon := cur.Quantize()
|
||||
switch l, g, err := anon.LatLng(); {
|
||||
switch latlng, g, err := anon.LatLng(); {
|
||||
case err != nil:
|
||||
t.Fatal(err)
|
||||
case lat == southPole:
|
||||
// initialize llng, to the first snapped longitude
|
||||
llat = l
|
||||
llat = latlng
|
||||
goto Lng
|
||||
case g != 0:
|
||||
t.Fatalf("%v is west or east of %v", anon, last)
|
||||
case l < llat:
|
||||
case latlng < llat:
|
||||
t.Fatalf("%v is south of %v", anon, last)
|
||||
case l == llat:
|
||||
case latlng == llat:
|
||||
continue
|
||||
case l > llat:
|
||||
case latlng > llat:
|
||||
switch dist, err := last.DistanceTo(anon); {
|
||||
case err != nil:
|
||||
t.Fatal(err)
|
||||
@ -55,7 +55,7 @@ func TestPointAnonymize(t *testing.T) {
|
||||
t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon)
|
||||
t.Fatalf("%v is too close to %v", anon, last)
|
||||
default:
|
||||
llat = l
|
||||
llat = latlng
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,14 +65,14 @@ func TestPointAnonymize(t *testing.T) {
|
||||
last := geo.MakePoint(llat, llng)
|
||||
cur := geo.MakePoint(lat, lng)
|
||||
anon := cur.Quantize()
|
||||
switch l, g, err := anon.LatLng(); {
|
||||
switch latlng, g, err := anon.LatLng(); {
|
||||
case err != nil:
|
||||
t.Fatal(err)
|
||||
case lng == dateLine:
|
||||
// initialize llng, to the first snapped longitude
|
||||
llng = g
|
||||
continue
|
||||
case l != llat:
|
||||
case latlng != llat:
|
||||
t.Fatalf("%v is north or south of %v", anon, last)
|
||||
case g != llng:
|
||||
const tolerance = geo.MinSeparation * 0x1p-9
|
||||
|
||||
@ -167,11 +167,11 @@ func (k DiscoPublic) String() string {
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing DiscoPublic k and l lexicographically.
|
||||
// The result will be 0 if k == l, -1 if k < l, and +1 if k > l. This is useful
|
||||
// for situations requiring only one node in a pair to perform some operation,
|
||||
// e.g. probing UDP path lifetime.
|
||||
func (k DiscoPublic) Compare(l DiscoPublic) int {
|
||||
return bytes.Compare(k.k[:], l.k[:])
|
||||
// The result will be 0 if k == other, -1 if k < other, and +1 if k > other.
|
||||
// This is useful for situations requiring only one node in a pair to perform
|
||||
// some operation, e.g. probing UDP path lifetime.
|
||||
func (k DiscoPublic) Compare(other DiscoPublic) int {
|
||||
return bytes.Compare(k.k[:], other.k[:])
|
||||
}
|
||||
|
||||
// AppendText implements encoding.TextAppender.
|
||||
|
||||
@ -45,36 +45,36 @@ func ListWithOpts[T ImmutableType](opts ...Options) List[T] {
|
||||
// SetValue configures the preference with the specified value.
|
||||
// It fails and returns [ErrManaged] if p is a managed preference,
|
||||
// and [ErrReadOnly] if p is a read-only preference.
|
||||
func (l *List[T]) SetValue(val []T) error {
|
||||
return l.preference.SetValue(cloneSlice(val))
|
||||
func (ls *List[T]) SetValue(val []T) error {
|
||||
return ls.preference.SetValue(cloneSlice(val))
|
||||
}
|
||||
|
||||
// SetManagedValue configures the preference with the specified value
|
||||
// and marks the preference as managed.
|
||||
func (l *List[T]) SetManagedValue(val []T) {
|
||||
l.preference.SetManagedValue(cloneSlice(val))
|
||||
func (ls *List[T]) SetManagedValue(val []T) {
|
||||
ls.preference.SetManagedValue(cloneSlice(val))
|
||||
}
|
||||
|
||||
// View returns a read-only view of l.
|
||||
func (l *List[T]) View() ListView[T] {
|
||||
return ListView[T]{l}
|
||||
func (ls *List[T]) View() ListView[T] {
|
||||
return ListView[T]{ls}
|
||||
}
|
||||
|
||||
// Clone returns a copy of l that aliases no memory with l.
|
||||
func (l List[T]) Clone() *List[T] {
|
||||
res := ptr.To(l)
|
||||
if v, ok := l.s.Value.GetOk(); ok {
|
||||
func (ls List[T]) Clone() *List[T] {
|
||||
res := ptr.To(ls)
|
||||
if v, ok := ls.s.Value.GetOk(); ok {
|
||||
res.s.Value.Set(append(v[:0:0], v...))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Equal reports whether l and l2 are equal.
|
||||
func (l List[T]) Equal(l2 List[T]) bool {
|
||||
if l.s.Metadata != l2.s.Metadata {
|
||||
func (ls List[T]) Equal(l2 List[T]) bool {
|
||||
if ls.s.Metadata != l2.s.Metadata {
|
||||
return false
|
||||
}
|
||||
v1, ok1 := l.s.Value.GetOk()
|
||||
v1, ok1 := ls.s.Value.GetOk()
|
||||
v2, ok2 := l2.s.Value.GetOk()
|
||||
if ok1 != ok2 {
|
||||
return false
|
||||
|
||||
@ -487,31 +487,31 @@ func TestItemView(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListView(t *testing.T) {
|
||||
l := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly)
|
||||
ls := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly)
|
||||
|
||||
lv := l.View()
|
||||
lv := ls.View()
|
||||
checkIsSet(t, lv, true)
|
||||
checkIsManaged(t, lv, false)
|
||||
checkIsReadOnly(t, lv, true)
|
||||
checkValue(t, lv, views.SliceOf(l.Value()))
|
||||
checkValueOk(t, lv, views.SliceOf(l.Value()), true)
|
||||
checkValue(t, lv, views.SliceOf(ls.Value()))
|
||||
checkValueOk(t, lv, views.SliceOf(ls.Value()), true)
|
||||
|
||||
l2 := *lv.AsStruct()
|
||||
checkEqual(t, l, l2, true)
|
||||
checkEqual(t, ls, l2, true)
|
||||
}
|
||||
|
||||
func TestStructListView(t *testing.T) {
|
||||
l := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly)
|
||||
ls := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly)
|
||||
|
||||
lv := StructListViewOf(&l)
|
||||
lv := StructListViewOf(&ls)
|
||||
checkIsSet(t, lv, true)
|
||||
checkIsManaged(t, lv, false)
|
||||
checkIsReadOnly(t, lv, true)
|
||||
checkValue(t, lv, views.SliceOfViews(l.Value()))
|
||||
checkValueOk(t, lv, views.SliceOfViews(l.Value()), true)
|
||||
checkValue(t, lv, views.SliceOfViews(ls.Value()))
|
||||
checkValueOk(t, lv, views.SliceOfViews(ls.Value()), true)
|
||||
|
||||
l2 := *lv.AsStruct()
|
||||
checkEqual(t, l, l2, true)
|
||||
checkEqual(t, ls, l2, true)
|
||||
}
|
||||
|
||||
func TestStructMapView(t *testing.T) {
|
||||
|
||||
@ -33,20 +33,20 @@ func StructListWithOpts[T views.Cloner[T]](opts ...Options) StructList[T] {
|
||||
// SetValue configures the preference with the specified value.
|
||||
// It fails and returns [ErrManaged] if p is a managed preference,
|
||||
// and [ErrReadOnly] if p is a read-only preference.
|
||||
func (l *StructList[T]) SetValue(val []T) error {
|
||||
return l.preference.SetValue(deepCloneSlice(val))
|
||||
func (ls *StructList[T]) SetValue(val []T) error {
|
||||
return ls.preference.SetValue(deepCloneSlice(val))
|
||||
}
|
||||
|
||||
// SetManagedValue configures the preference with the specified value
|
||||
// and marks the preference as managed.
|
||||
func (l *StructList[T]) SetManagedValue(val []T) {
|
||||
l.preference.SetManagedValue(deepCloneSlice(val))
|
||||
func (ls *StructList[T]) SetManagedValue(val []T) {
|
||||
ls.preference.SetManagedValue(deepCloneSlice(val))
|
||||
}
|
||||
|
||||
// Clone returns a copy of l that aliases no memory with l.
|
||||
func (l StructList[T]) Clone() *StructList[T] {
|
||||
res := ptr.To(l)
|
||||
if v, ok := l.s.Value.GetOk(); ok {
|
||||
func (ls StructList[T]) Clone() *StructList[T] {
|
||||
res := ptr.To(ls)
|
||||
if v, ok := ls.s.Value.GetOk(); ok {
|
||||
res.s.Value.Set(deepCloneSlice(v))
|
||||
}
|
||||
return res
|
||||
@ -56,11 +56,11 @@ func (l StructList[T]) Clone() *StructList[T] {
|
||||
// If the template type T implements an Equal(T) bool method, it will be used
|
||||
// instead of the == operator for value comparison.
|
||||
// It panics if T is not comparable.
|
||||
func (l StructList[T]) Equal(l2 StructList[T]) bool {
|
||||
if l.s.Metadata != l2.s.Metadata {
|
||||
func (ls StructList[T]) Equal(l2 StructList[T]) bool {
|
||||
if ls.s.Metadata != l2.s.Metadata {
|
||||
return false
|
||||
}
|
||||
v1, ok1 := l.s.Value.GetOk()
|
||||
v1, ok1 := ls.s.Value.GetOk()
|
||||
v2, ok2 := l2.s.Value.GetOk()
|
||||
if ok1 != ok2 {
|
||||
return false
|
||||
@ -105,8 +105,8 @@ type StructListView[T views.ViewCloner[T, V], V views.StructView[T]] struct {
|
||||
|
||||
// StructListViewOf returns a read-only view of l.
|
||||
// It is used by [tailscale.com/cmd/viewer].
|
||||
func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](l *StructList[T]) StructListView[T, V] {
|
||||
return StructListView[T, V]{l}
|
||||
func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](ls *StructList[T]) StructListView[T, V] {
|
||||
return StructListView[T, V]{ls}
|
||||
}
|
||||
|
||||
// Valid reports whether the underlying [StructList] is non-nil.
|
||||
|
||||
@ -31,14 +31,14 @@ func StructMapWithOpts[K MapKeyType, V views.Cloner[V]](opts ...Options) StructM
|
||||
// SetValue configures the preference with the specified value.
|
||||
// It fails and returns [ErrManaged] if p is a managed preference,
|
||||
// and [ErrReadOnly] if p is a read-only preference.
|
||||
func (l *StructMap[K, V]) SetValue(val map[K]V) error {
|
||||
return l.preference.SetValue(deepCloneMap(val))
|
||||
func (m *StructMap[K, V]) SetValue(val map[K]V) error {
|
||||
return m.preference.SetValue(deepCloneMap(val))
|
||||
}
|
||||
|
||||
// SetManagedValue configures the preference with the specified value
|
||||
// and marks the preference as managed.
|
||||
func (l *StructMap[K, V]) SetManagedValue(val map[K]V) {
|
||||
l.preference.SetManagedValue(deepCloneMap(val))
|
||||
func (m *StructMap[K, V]) SetManagedValue(val map[K]V) {
|
||||
m.preference.SetManagedValue(deepCloneMap(val))
|
||||
}
|
||||
|
||||
// Clone returns a copy of m that aliases no memory with m.
|
||||
|
||||
@ -94,59 +94,59 @@ type bucket struct {
|
||||
|
||||
// Allow charges the key one token (up to the overdraft limit), and
|
||||
// reports whether the key can perform an action.
|
||||
func (l *Limiter[K]) Allow(key K) bool {
|
||||
return l.allow(key, time.Now())
|
||||
func (lm *Limiter[K]) Allow(key K) bool {
|
||||
return lm.allow(key, time.Now())
|
||||
}
|
||||
|
||||
func (l *Limiter[K]) allow(key K, now time.Time) bool {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.allowBucketLocked(l.getBucketLocked(key, now), now)
|
||||
func (lm *Limiter[K]) allow(key K, now time.Time) bool {
|
||||
lm.mu.Lock()
|
||||
defer lm.mu.Unlock()
|
||||
return lm.allowBucketLocked(lm.getBucketLocked(key, now), now)
|
||||
}
|
||||
|
||||
func (l *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket {
|
||||
if l.cache == nil {
|
||||
l.cache = &lru.Cache[K, *bucket]{MaxEntries: l.Size}
|
||||
} else if b := l.cache.Get(key); b != nil {
|
||||
func (lm *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket {
|
||||
if lm.cache == nil {
|
||||
lm.cache = &lru.Cache[K, *bucket]{MaxEntries: lm.Size}
|
||||
} else if b := lm.cache.Get(key); b != nil {
|
||||
return b
|
||||
}
|
||||
b := &bucket{
|
||||
cur: l.Max,
|
||||
lastUpdate: now.Truncate(l.RefillInterval),
|
||||
cur: lm.Max,
|
||||
lastUpdate: now.Truncate(lm.RefillInterval),
|
||||
}
|
||||
l.cache.Set(key, b)
|
||||
lm.cache.Set(key, b)
|
||||
return b
|
||||
}
|
||||
|
||||
func (l *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool {
|
||||
func (lm *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool {
|
||||
// Only update the bucket quota if needed to process request.
|
||||
if b.cur <= 0 {
|
||||
l.updateBucketLocked(b, now)
|
||||
lm.updateBucketLocked(b, now)
|
||||
}
|
||||
ret := b.cur > 0
|
||||
if b.cur > -l.Overdraft {
|
||||
if b.cur > -lm.Overdraft {
|
||||
b.cur--
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (l *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) {
|
||||
now = now.Truncate(l.RefillInterval)
|
||||
func (lm *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) {
|
||||
now = now.Truncate(lm.RefillInterval)
|
||||
if now.Before(b.lastUpdate) {
|
||||
return
|
||||
}
|
||||
timeDelta := max(now.Sub(b.lastUpdate), 0)
|
||||
tokenDelta := int64(timeDelta / l.RefillInterval)
|
||||
b.cur = min(b.cur+tokenDelta, l.Max)
|
||||
tokenDelta := int64(timeDelta / lm.RefillInterval)
|
||||
b.cur = min(b.cur+tokenDelta, lm.Max)
|
||||
b.lastUpdate = now
|
||||
}
|
||||
|
||||
// peekForTest returns the number of tokens for key, also reporting
|
||||
// whether key was present.
|
||||
func (l *Limiter[K]) tokensForTest(key K) (int64, bool) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if b, ok := l.cache.PeekOk(key); ok {
|
||||
func (lm *Limiter[K]) tokensForTest(key K) (int64, bool) {
|
||||
lm.mu.Lock()
|
||||
defer lm.mu.Unlock()
|
||||
if b, ok := lm.cache.PeekOk(key); ok {
|
||||
return b.cur, true
|
||||
}
|
||||
return 0, false
|
||||
@ -159,12 +159,12 @@ func (l *Limiter[K]) tokensForTest(key K) (int64, bool) {
|
||||
// DumpHTML blocks other callers of the limiter while it collects the
|
||||
// state for dumping. It should not be called on large limiters
|
||||
// involved in hot codepaths.
|
||||
func (l *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) {
|
||||
l.dumpHTML(w, onlyLimited, time.Now())
|
||||
func (lm *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) {
|
||||
lm.dumpHTML(w, onlyLimited, time.Now())
|
||||
}
|
||||
|
||||
func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) {
|
||||
dump := l.collectDump(now)
|
||||
func (lm *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) {
|
||||
dump := lm.collectDump(now)
|
||||
io.WriteString(w, "<table><tr><th>Key</th><th>Tokens</th></tr>")
|
||||
for _, line := range dump {
|
||||
if onlyLimited && line.Tokens > 0 {
|
||||
@ -183,13 +183,13 @@ func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) {
|
||||
}
|
||||
|
||||
// collectDump grabs a copy of the limiter state needed by DumpHTML.
|
||||
func (l *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
func (lm *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] {
|
||||
lm.mu.Lock()
|
||||
defer lm.mu.Unlock()
|
||||
|
||||
ret := make([]dumpEntry[K], 0, l.cache.Len())
|
||||
l.cache.ForEach(func(k K, v *bucket) {
|
||||
l.updateBucketLocked(v, now) // so stats are accurate
|
||||
ret := make([]dumpEntry[K], 0, lm.cache.Len())
|
||||
lm.cache.ForEach(func(k K, v *bucket) {
|
||||
lm.updateBucketLocked(v, now) // so stats are accurate
|
||||
ret = append(ret, dumpEntry[K]{k, v.cur})
|
||||
})
|
||||
return ret
|
||||
|
||||
@ -16,7 +16,7 @@ const testRefillInterval = time.Second
|
||||
|
||||
func TestLimiter(t *testing.T) {
|
||||
// 1qps, burst of 10, 2 keys tracked
|
||||
l := &Limiter[string]{
|
||||
limiter := &Limiter[string]{
|
||||
Size: 2,
|
||||
Max: 10,
|
||||
RefillInterval: testRefillInterval,
|
||||
@ -24,48 +24,48 @@ func TestLimiter(t *testing.T) {
|
||||
|
||||
// Consume entire burst
|
||||
now := time.Now().Truncate(testRefillInterval)
|
||||
allowed(t, l, "foo", 10, now)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", 0)
|
||||
allowed(t, limiter, "foo", 10, now)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", 0)
|
||||
|
||||
allowed(t, l, "bar", 10, now)
|
||||
denied(t, l, "bar", 1, now)
|
||||
hasTokens(t, l, "bar", 0)
|
||||
allowed(t, limiter, "bar", 10, now)
|
||||
denied(t, limiter, "bar", 1, now)
|
||||
hasTokens(t, limiter, "bar", 0)
|
||||
|
||||
// Refill 1 token for both foo and bar
|
||||
now = now.Add(time.Second + time.Millisecond)
|
||||
allowed(t, l, "foo", 1, now)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", 0)
|
||||
allowed(t, limiter, "foo", 1, now)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", 0)
|
||||
|
||||
allowed(t, l, "bar", 1, now)
|
||||
denied(t, l, "bar", 1, now)
|
||||
hasTokens(t, l, "bar", 0)
|
||||
allowed(t, limiter, "bar", 1, now)
|
||||
denied(t, limiter, "bar", 1, now)
|
||||
hasTokens(t, limiter, "bar", 0)
|
||||
|
||||
// Refill 2 tokens for foo and bar
|
||||
now = now.Add(2*time.Second + time.Millisecond)
|
||||
allowed(t, l, "foo", 2, now)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", 0)
|
||||
allowed(t, limiter, "foo", 2, now)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", 0)
|
||||
|
||||
allowed(t, l, "bar", 2, now)
|
||||
denied(t, l, "bar", 1, now)
|
||||
hasTokens(t, l, "bar", 0)
|
||||
allowed(t, limiter, "bar", 2, now)
|
||||
denied(t, limiter, "bar", 1, now)
|
||||
hasTokens(t, limiter, "bar", 0)
|
||||
|
||||
// qux can burst 10, evicts foo so it can immediately burst 10 again too
|
||||
allowed(t, l, "qux", 10, now)
|
||||
denied(t, l, "qux", 1, now)
|
||||
notInLimiter(t, l, "foo")
|
||||
denied(t, l, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled
|
||||
allowed(t, limiter, "qux", 10, now)
|
||||
denied(t, limiter, "qux", 1, now)
|
||||
notInLimiter(t, limiter, "foo")
|
||||
denied(t, limiter, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled
|
||||
|
||||
allowed(t, l, "foo", 10, now)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", 0)
|
||||
allowed(t, limiter, "foo", 10, now)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", 0)
|
||||
}
|
||||
|
||||
func TestLimiterOverdraft(t *testing.T) {
|
||||
// 1qps, burst of 10, overdraft of 2, 2 keys tracked
|
||||
l := &Limiter[string]{
|
||||
limiter := &Limiter[string]{
|
||||
Size: 2,
|
||||
Max: 10,
|
||||
Overdraft: 2,
|
||||
@ -74,51 +74,51 @@ func TestLimiterOverdraft(t *testing.T) {
|
||||
|
||||
// Consume entire burst, go 1 into debt
|
||||
now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond)
|
||||
allowed(t, l, "foo", 10, now)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", -1)
|
||||
allowed(t, limiter, "foo", 10, now)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", -1)
|
||||
|
||||
allowed(t, l, "bar", 10, now)
|
||||
denied(t, l, "bar", 1, now)
|
||||
hasTokens(t, l, "bar", -1)
|
||||
allowed(t, limiter, "bar", 10, now)
|
||||
denied(t, limiter, "bar", 1, now)
|
||||
hasTokens(t, limiter, "bar", -1)
|
||||
|
||||
// Refill 1 token for both foo and bar.
|
||||
// Still denied, still in debt.
|
||||
now = now.Add(time.Second)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", -1)
|
||||
denied(t, l, "bar", 1, now)
|
||||
hasTokens(t, l, "bar", -1)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", -1)
|
||||
denied(t, limiter, "bar", 1, now)
|
||||
hasTokens(t, limiter, "bar", -1)
|
||||
|
||||
// Refill 2 tokens for foo and bar (1 available after debt), try
|
||||
// to consume 4. Overdraft is capped to 2.
|
||||
now = now.Add(2 * time.Second)
|
||||
allowed(t, l, "foo", 1, now)
|
||||
denied(t, l, "foo", 3, now)
|
||||
hasTokens(t, l, "foo", -2)
|
||||
allowed(t, limiter, "foo", 1, now)
|
||||
denied(t, limiter, "foo", 3, now)
|
||||
hasTokens(t, limiter, "foo", -2)
|
||||
|
||||
allowed(t, l, "bar", 1, now)
|
||||
denied(t, l, "bar", 3, now)
|
||||
hasTokens(t, l, "bar", -2)
|
||||
allowed(t, limiter, "bar", 1, now)
|
||||
denied(t, limiter, "bar", 3, now)
|
||||
hasTokens(t, limiter, "bar", -2)
|
||||
|
||||
// Refill 1, not enough to allow.
|
||||
now = now.Add(time.Second)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", -2)
|
||||
denied(t, l, "bar", 1, now)
|
||||
hasTokens(t, l, "bar", -2)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", -2)
|
||||
denied(t, limiter, "bar", 1, now)
|
||||
hasTokens(t, limiter, "bar", -2)
|
||||
|
||||
// qux evicts foo, foo can immediately burst 10 again.
|
||||
allowed(t, l, "qux", 1, now)
|
||||
hasTokens(t, l, "qux", 9)
|
||||
notInLimiter(t, l, "foo")
|
||||
allowed(t, l, "foo", 10, now)
|
||||
denied(t, l, "foo", 1, now)
|
||||
hasTokens(t, l, "foo", -1)
|
||||
allowed(t, limiter, "qux", 1, now)
|
||||
hasTokens(t, limiter, "qux", 9)
|
||||
notInLimiter(t, limiter, "foo")
|
||||
allowed(t, limiter, "foo", 10, now)
|
||||
denied(t, limiter, "foo", 1, now)
|
||||
hasTokens(t, limiter, "foo", -1)
|
||||
}
|
||||
|
||||
func TestDumpHTML(t *testing.T) {
|
||||
l := &Limiter[string]{
|
||||
limiter := &Limiter[string]{
|
||||
Size: 3,
|
||||
Max: 10,
|
||||
Overdraft: 10,
|
||||
@ -126,13 +126,13 @@ func TestDumpHTML(t *testing.T) {
|
||||
}
|
||||
|
||||
now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond)
|
||||
allowed(t, l, "foo", 10, now)
|
||||
denied(t, l, "foo", 2, now)
|
||||
allowed(t, l, "bar", 4, now)
|
||||
allowed(t, l, "qux", 1, now)
|
||||
allowed(t, limiter, "foo", 10, now)
|
||||
denied(t, limiter, "foo", 2, now)
|
||||
allowed(t, limiter, "bar", 4, now)
|
||||
allowed(t, limiter, "qux", 1, now)
|
||||
|
||||
var out bytes.Buffer
|
||||
l.DumpHTML(&out, false)
|
||||
limiter.DumpHTML(&out, false)
|
||||
want := strings.Join([]string{
|
||||
"<table>",
|
||||
"<tr><th>Key</th><th>Tokens</th></tr>",
|
||||
@ -146,7 +146,7 @@ func TestDumpHTML(t *testing.T) {
|
||||
}
|
||||
|
||||
out.Reset()
|
||||
l.DumpHTML(&out, true)
|
||||
limiter.DumpHTML(&out, true)
|
||||
want = strings.Join([]string{
|
||||
"<table>",
|
||||
"<tr><th>Key</th><th>Tokens</th></tr>",
|
||||
@ -161,7 +161,7 @@ func TestDumpHTML(t *testing.T) {
|
||||
// organically.
|
||||
now = now.Add(3 * time.Second)
|
||||
out.Reset()
|
||||
l.dumpHTML(&out, false, now)
|
||||
limiter.dumpHTML(&out, false, now)
|
||||
want = strings.Join([]string{
|
||||
"<table>",
|
||||
"<tr><th>Key</th><th>Tokens</th></tr>",
|
||||
@ -175,29 +175,29 @@ func TestDumpHTML(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func allowed(t *testing.T, l *Limiter[string], key string, count int, now time.Time) {
|
||||
func allowed(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) {
|
||||
t.Helper()
|
||||
for i := range count {
|
||||
if !l.allow(key, now) {
|
||||
toks, ok := l.tokensForTest(key)
|
||||
if !limiter.allow(key, now) {
|
||||
toks, ok := limiter.tokensForTest(key)
|
||||
t.Errorf("after %d times: allow(%q, %q) = false, want true (%d tokens available, in cache = %v)", i, key, now, toks, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func denied(t *testing.T, l *Limiter[string], key string, count int, now time.Time) {
|
||||
func denied(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) {
|
||||
t.Helper()
|
||||
for i := range count {
|
||||
if l.allow(key, now) {
|
||||
toks, ok := l.tokensForTest(key)
|
||||
if limiter.allow(key, now) {
|
||||
toks, ok := limiter.tokensForTest(key)
|
||||
t.Errorf("after %d times: allow(%q, %q) = true, want false (%d tokens available, in cache = %v)", i, key, now, toks, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) {
|
||||
func hasTokens(t *testing.T, limiter *Limiter[string], key string, want int64) {
|
||||
t.Helper()
|
||||
got, ok := l.tokensForTest(key)
|
||||
got, ok := limiter.tokensForTest(key)
|
||||
if !ok {
|
||||
t.Errorf("key %q missing from limiter", key)
|
||||
} else if got != want {
|
||||
@ -205,9 +205,9 @@ func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) {
|
||||
}
|
||||
}
|
||||
|
||||
func notInLimiter(t *testing.T, l *Limiter[string], key string) {
|
||||
func notInLimiter(t *testing.T, limiter *Limiter[string], key string) {
|
||||
t.Helper()
|
||||
if tokens, ok := l.tokensForTest(key); ok {
|
||||
if tokens, ok := limiter.tokensForTest(key); ok {
|
||||
t.Errorf("key %q unexpectedly tracked by limiter, with %d tokens", key, tokens)
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ type tableDetector interface {
|
||||
type linuxFWDetector struct{}
|
||||
|
||||
// iptDetect returns the number of iptables rules in the current namespace.
|
||||
func (l linuxFWDetector) iptDetect() (int, error) {
|
||||
func (ld linuxFWDetector) iptDetect() (int, error) {
|
||||
return detectIptables()
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ var hookDetectNetfilter feature.Hook[func() (int, error)]
|
||||
var ErrUnsupported = errors.New("linuxfw:unsupported")
|
||||
|
||||
// nftDetect returns the number of nftables rules in the current namespace.
|
||||
func (l linuxFWDetector) nftDetect() (int, error) {
|
||||
func (ld linuxFWDetector) nftDetect() (int, error) {
|
||||
if f, ok := hookDetectNetfilter.GetOk(); ok {
|
||||
return f()
|
||||
}
|
||||
|
||||
@ -84,8 +84,8 @@ func TestStressEvictions(t *testing.T) {
|
||||
for range numProbes {
|
||||
v := vals[rand.Intn(len(vals))]
|
||||
c.Set(v, true)
|
||||
if l := c.Len(); l > cacheSize {
|
||||
t.Fatalf("Cache size now %d, want max %d", l, cacheSize)
|
||||
if ln := c.Len(); ln > cacheSize {
|
||||
t.Fatalf("Cache size now %d, want max %d", ln, cacheSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -119,8 +119,8 @@ func TestStressBatchedEvictions(t *testing.T) {
|
||||
c.DeleteOldest()
|
||||
}
|
||||
}
|
||||
if l := c.Len(); l > cacheSizeMax {
|
||||
t.Fatalf("Cache size now %d, want max %d", l, cacheSizeMax)
|
||||
if ln := c.Len(); ln > cacheSizeMax {
|
||||
t.Fatalf("Cache size now %d, want max %d", ln, cacheSizeMax)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -322,33 +322,33 @@ func Definitions() ([]*Definition, error) {
|
||||
type PlatformList []string
|
||||
|
||||
// Has reports whether l contains the target platform.
|
||||
func (l PlatformList) Has(target string) bool {
|
||||
if len(l) == 0 {
|
||||
func (ls PlatformList) Has(target string) bool {
|
||||
if len(ls) == 0 {
|
||||
return true
|
||||
}
|
||||
return slices.ContainsFunc(l, func(os string) bool {
|
||||
return slices.ContainsFunc(ls, func(os string) bool {
|
||||
return strings.EqualFold(os, target)
|
||||
})
|
||||
}
|
||||
|
||||
// HasCurrent is like Has, but for the current platform.
|
||||
func (l PlatformList) HasCurrent() bool {
|
||||
return l.Has(internal.OS())
|
||||
func (ls PlatformList) HasCurrent() bool {
|
||||
return ls.Has(internal.OS())
|
||||
}
|
||||
|
||||
// mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions,
|
||||
// if either l or l2 is empty, the merged result in l will also be empty.
|
||||
func (l *PlatformList) mergeFrom(l2 PlatformList) {
|
||||
func (ls *PlatformList) mergeFrom(l2 PlatformList) {
|
||||
switch {
|
||||
case len(*l) == 0:
|
||||
case len(*ls) == 0:
|
||||
// No-op. An empty list indicates no platform restrictions.
|
||||
case len(l2) == 0:
|
||||
// Merging with an empty list results in an empty list.
|
||||
*l = l2
|
||||
*ls = l2
|
||||
default:
|
||||
// Append, sort and dedup.
|
||||
*l = append(*l, l2...)
|
||||
slices.Sort(*l)
|
||||
*l = slices.Compact(*l)
|
||||
*ls = append(*ls, l2...)
|
||||
slices.Sort(*ls)
|
||||
*ls = slices.Compact(*ls)
|
||||
}
|
||||
}
|
||||
|
||||
@ -311,8 +311,8 @@ func TestListSettingDefinitions(t *testing.T) {
|
||||
t.Fatalf("SetDefinitionsForTest failed: %v", err)
|
||||
}
|
||||
|
||||
cmp := func(l, r *Definition) int {
|
||||
return strings.Compare(string(l.Key()), string(r.Key()))
|
||||
cmp := func(a, b *Definition) int {
|
||||
return strings.Compare(string(a.Key()), string(b.Key()))
|
||||
}
|
||||
want := append([]*Definition{}, definitions...)
|
||||
slices.SortFunc(want, cmp)
|
||||
|
||||
@ -182,16 +182,16 @@ func doWithMachinePolicyLocked(t *testing.T, f func()) {
|
||||
f()
|
||||
}
|
||||
|
||||
func doWithCustomEnterLeaveFuncs(t *testing.T, f func(l *PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) {
|
||||
func doWithCustomEnterLeaveFuncs(t *testing.T, f func(*PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) {
|
||||
t.Helper()
|
||||
|
||||
l := NewMachinePolicyLock()
|
||||
l.enterFn, l.leaveFn = enter, leave
|
||||
lock := NewMachinePolicyLock()
|
||||
lock.enterFn, lock.leaveFn = enter, leave
|
||||
t.Cleanup(func() {
|
||||
if err := l.Close(); err != nil {
|
||||
if err := lock.Close(); err != nil {
|
||||
t.Fatalf("(*PolicyLock).Close failed: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
f(l)
|
||||
f(lock)
|
||||
}
|
||||
|
||||
@ -127,32 +127,32 @@ func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) {
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// Lock locks l.
|
||||
// It returns [ErrInvalidLockState] if l has a zero value or has already been closed,
|
||||
// Lock locks lk.
|
||||
// It returns [ErrInvalidLockState] if lk has a zero value or has already been closed,
|
||||
// [ErrLockRestricted] if the lock cannot be acquired due to a restriction in place,
|
||||
// or a [syscall.Errno] if the underlying Group Policy lock cannot be acquired.
|
||||
//
|
||||
// As a special case, it fails with [windows.ERROR_ACCESS_DENIED]
|
||||
// if l is a user policy lock, and the corresponding user is not logged in
|
||||
// if lk is a user policy lock, and the corresponding user is not logged in
|
||||
// interactively at the time of the call.
|
||||
func (l *PolicyLock) Lock() error {
|
||||
func (lk *PolicyLock) Lock() error {
|
||||
if policyLockRestricted.Load() > 0 {
|
||||
return ErrLockRestricted
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.lockCnt.Add(2)&1 == 0 {
|
||||
lk.mu.Lock()
|
||||
defer lk.mu.Unlock()
|
||||
if lk.lockCnt.Add(2)&1 == 0 {
|
||||
// The lock cannot be acquired because it has either never been properly
|
||||
// created or its Close method has already been called. However, we need
|
||||
// to call Unlock to both decrement lockCnt and leave the underlying
|
||||
// CriticalPolicySection if we won the race with another goroutine and
|
||||
// now own the lock.
|
||||
l.Unlock()
|
||||
lk.Unlock()
|
||||
return ErrInvalidLockState
|
||||
}
|
||||
|
||||
if l.handle != 0 {
|
||||
if lk.handle != 0 {
|
||||
// The underlying CriticalPolicySection is already acquired.
|
||||
// It is an R-Lock (with the W-counterpart owned by the Group Policy service),
|
||||
// meaning that it can be acquired by multiple readers simultaneously.
|
||||
@ -160,20 +160,20 @@ func (l *PolicyLock) Lock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return l.lockSlow()
|
||||
return lk.lockSlow()
|
||||
}
|
||||
|
||||
// lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock.
|
||||
// It waits for either the lock to be acquired, or for the Close method to be called.
|
||||
//
|
||||
// l.mu must be held.
|
||||
func (l *PolicyLock) lockSlow() (err error) {
|
||||
func (lk *PolicyLock) lockSlow() (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// Decrement the counter if the lock cannot be acquired,
|
||||
// and complete the pending close request if we're the last owner.
|
||||
if l.lockCnt.Add(-2) == 0 {
|
||||
l.closeInternal()
|
||||
if lk.lockCnt.Add(-2) == 0 {
|
||||
lk.closeInternal()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -190,12 +190,12 @@ func (l *PolicyLock) lockSlow() (err error) {
|
||||
resultCh := make(chan policyLockResult)
|
||||
|
||||
go func() {
|
||||
closing := l.closing
|
||||
if l.scope == UserPolicy && l.token != 0 {
|
||||
closing := lk.closing
|
||||
if lk.scope == UserPolicy && lk.token != 0 {
|
||||
// Impersonate the user whose critical policy section we want to acquire.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
if err := impersonateLoggedOnUser(l.token); err != nil {
|
||||
if err := impersonateLoggedOnUser(lk.token); err != nil {
|
||||
initCh <- err
|
||||
return
|
||||
}
|
||||
@ -209,10 +209,10 @@ func (l *PolicyLock) lockSlow() (err error) {
|
||||
close(initCh)
|
||||
|
||||
var machine bool
|
||||
if l.scope == MachinePolicy {
|
||||
if lk.scope == MachinePolicy {
|
||||
machine = true
|
||||
}
|
||||
handle, err := l.enterFn(machine)
|
||||
handle, err := lk.enterFn(machine)
|
||||
|
||||
send_result:
|
||||
for {
|
||||
@ -226,7 +226,7 @@ func (l *PolicyLock) lockSlow() (err error) {
|
||||
// The lock is being closed, and we lost the race to l.closing
|
||||
// it the calling goroutine.
|
||||
if err == nil {
|
||||
l.leaveFn(handle)
|
||||
lk.leaveFn(handle)
|
||||
}
|
||||
break send_result
|
||||
default:
|
||||
@ -247,21 +247,21 @@ func (l *PolicyLock) lockSlow() (err error) {
|
||||
select {
|
||||
case result := <-resultCh:
|
||||
if result.err == nil {
|
||||
l.handle = result.handle
|
||||
lk.handle = result.handle
|
||||
}
|
||||
return result.err
|
||||
case <-l.closing:
|
||||
case <-lk.closing:
|
||||
return ErrInvalidLockState
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks l.
|
||||
// It panics if l is not locked on entry to Unlock.
|
||||
func (l *PolicyLock) Unlock() {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
func (lk *PolicyLock) Unlock() {
|
||||
lk.mu.Lock()
|
||||
defer lk.mu.Unlock()
|
||||
|
||||
lockCnt := l.lockCnt.Add(-2)
|
||||
lockCnt := lk.lockCnt.Add(-2)
|
||||
if lockCnt < 0 {
|
||||
panic("negative lockCnt")
|
||||
}
|
||||
@ -273,33 +273,33 @@ func (l *PolicyLock) Unlock() {
|
||||
return
|
||||
}
|
||||
|
||||
if l.handle != 0 {
|
||||
if lk.handle != 0 {
|
||||
// Impersonation is not required to unlock a critical policy section.
|
||||
// The handle we pass determines which mutex will be unlocked.
|
||||
leaveCriticalPolicySection(l.handle)
|
||||
l.handle = 0
|
||||
leaveCriticalPolicySection(lk.handle)
|
||||
lk.handle = 0
|
||||
}
|
||||
|
||||
if lockCnt == 0 {
|
||||
// Complete the pending close request if there's no more readers.
|
||||
l.closeInternal()
|
||||
lk.closeInternal()
|
||||
}
|
||||
}
|
||||
|
||||
// Close releases resources associated with l.
|
||||
// It is a no-op for the machine policy lock.
|
||||
func (l *PolicyLock) Close() error {
|
||||
lockCnt := l.lockCnt.Load()
|
||||
func (lk *PolicyLock) Close() error {
|
||||
lockCnt := lk.lockCnt.Load()
|
||||
if lockCnt&1 == 0 {
|
||||
// The lock has never been initialized, or close has already been called.
|
||||
return nil
|
||||
}
|
||||
|
||||
close(l.closing)
|
||||
close(lk.closing)
|
||||
|
||||
// Unset the LSB to indicate a pending close request.
|
||||
for !l.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) {
|
||||
lockCnt = l.lockCnt.Load()
|
||||
for !lk.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) {
|
||||
lockCnt = lk.lockCnt.Load()
|
||||
}
|
||||
|
||||
if lockCnt != 0 {
|
||||
@ -307,16 +307,16 @@ func (l *PolicyLock) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return l.closeInternal()
|
||||
return lk.closeInternal()
|
||||
}
|
||||
|
||||
func (l *PolicyLock) closeInternal() error {
|
||||
if l.token != 0 {
|
||||
if err := l.token.Close(); err != nil {
|
||||
func (lk *PolicyLock) closeInternal() error {
|
||||
if lk.token != 0 {
|
||||
if err := lk.token.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.token = 0
|
||||
lk.token = 0
|
||||
}
|
||||
l.closing = nil
|
||||
lk.closing = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -256,8 +256,8 @@ func checkDomainAccount(username string) (sanitizedUserName string, isDomainAcco
|
||||
// errors.Is to check for it. When capLevel == CapCreateProcess, the logon
|
||||
// enforces the user's logon hours policy (when present).
|
||||
func (ls *lsaSession) logonAs(srcName string, u *user.User, capLevel CapabilityLevel) (token windows.Token, err error) {
|
||||
if l := len(srcName); l == 0 || l > _TOKEN_SOURCE_LENGTH {
|
||||
return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, l)
|
||||
if ln := len(srcName); ln == 0 || ln > _TOKEN_SOURCE_LENGTH {
|
||||
return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, ln)
|
||||
}
|
||||
if err := checkASCII(srcName); err != nil {
|
||||
return 0, fmt.Errorf("%w: %v", ErrBadSrcName, err)
|
||||
|
||||
@ -938,10 +938,10 @@ func mergeEnv(existingEnv []string, extraEnv map[string]string) []string {
|
||||
result = append(result, strings.Join([]string{k, v}, "="))
|
||||
}
|
||||
|
||||
slices.SortFunc(result, func(l, r string) int {
|
||||
kl, _, _ := strings.Cut(l, "=")
|
||||
kr, _, _ := strings.Cut(r, "=")
|
||||
return strings.Compare(kl, kr)
|
||||
slices.SortFunc(result, func(a, b string) int {
|
||||
ka, _, _ := strings.Cut(a, "=")
|
||||
kb, _, _ := strings.Cut(b, "=")
|
||||
return strings.Compare(ka, kb)
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
@ -83,8 +83,8 @@ func (sib *StartupInfoBuilder) Resolve() (startupInfo *windows.StartupInfo, inhe
|
||||
// Always create a Unicode environment.
|
||||
createProcessFlags = windows.CREATE_UNICODE_ENVIRONMENT
|
||||
|
||||
if l := uint32(len(sib.attrs)); l > 0 {
|
||||
attrCont, err := windows.NewProcThreadAttributeList(l)
|
||||
if ln := uint32(len(sib.attrs)); ln > 0 {
|
||||
attrCont, err := windows.NewProcThreadAttributeList(ln)
|
||||
if err != nil {
|
||||
return nil, false, 0, err
|
||||
}
|
||||
|
||||
@ -68,8 +68,8 @@ func checkContiguousBuffer[T any, BU BufUnit](t *testing.T, extra []BU, pt *T, p
|
||||
if gotLen := int(ptLen); gotLen != expectedLen {
|
||||
t.Errorf("allocation length got %d, want %d", gotLen, expectedLen)
|
||||
}
|
||||
if l := len(slcs); l != 1 {
|
||||
t.Errorf("len(slcs) got %d, want 1", l)
|
||||
if ln := len(slcs); ln != 1 {
|
||||
t.Errorf("len(slcs) got %d, want 1", ln)
|
||||
}
|
||||
if len(extra) == 0 && slcs[0] != nil {
|
||||
t.Error("slcs[0] got non-nil, want nil")
|
||||
|
||||
@ -66,8 +66,8 @@ func (p protocol) getLayers(d direction) []wf.LayerID {
|
||||
return layers
|
||||
}
|
||||
|
||||
func ruleName(action wf.Action, l wf.LayerID, name string) string {
|
||||
switch l {
|
||||
func ruleName(action wf.Action, layerID wf.LayerID, name string) string {
|
||||
switch layerID {
|
||||
case wf.LayerALEAuthConnectV4:
|
||||
return fmt.Sprintf("%s outbound %s (IPv4)", action, name)
|
||||
case wf.LayerALEAuthConnectV6:
|
||||
@ -307,8 +307,8 @@ func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions [
|
||||
|
||||
func (f *Firewall) addRules(name string, w weight, conditions []*wf.Match, action wf.Action, p protocol, d direction) ([]*wf.Rule, error) {
|
||||
var rules []*wf.Rule
|
||||
for _, l := range p.getLayers(d) {
|
||||
r, err := f.newRule(name, w, l, conditions, action)
|
||||
for _, layer := range p.getLayers(d) {
|
||||
r, err := f.newRule(name, w, layer, conditions, action)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) {
|
||||
}
|
||||
}
|
||||
|
||||
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
|
||||
func runDERPAndStun(t *testing.T, logf logger.Logf, ln nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) {
|
||||
d := derpserver.New(key.NewNode(), logf)
|
||||
|
||||
httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d))
|
||||
@ -119,7 +119,7 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st
|
||||
httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
|
||||
httpsrv.StartTLS()
|
||||
|
||||
stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l)
|
||||
stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, ln)
|
||||
|
||||
m := &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
@ -172,12 +172,12 @@ type magicStack struct {
|
||||
// newMagicStack builds and initializes an idle magicsock and
|
||||
// friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig
|
||||
// before anything interesting happens.
|
||||
func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack {
|
||||
func newMagicStack(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack {
|
||||
privateKey := key.NewNode()
|
||||
return newMagicStackWithKey(t, logf, l, derpMap, privateKey)
|
||||
return newMagicStackWithKey(t, logf, ln, derpMap, privateKey)
|
||||
}
|
||||
|
||||
func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack {
|
||||
func newMagicStackWithKey(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack {
|
||||
t.Helper()
|
||||
|
||||
bus := eventbustest.NewBus(t)
|
||||
@ -197,7 +197,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
|
||||
Logf: logf,
|
||||
HealthTracker: ht,
|
||||
DisablePortMapper: true,
|
||||
TestOnlyPacketListener: l,
|
||||
TestOnlyPacketListener: ln,
|
||||
EndpointsFunc: func(eps []tailcfg.Endpoint) {
|
||||
epCh <- eps
|
||||
},
|
||||
@ -687,13 +687,13 @@ func (localhostListener) ListenPacket(ctx context.Context, network, address stri
|
||||
|
||||
func TestTwoDevicePing(t *testing.T) {
|
||||
flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762")
|
||||
l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1)
|
||||
ln, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1)
|
||||
n := &devices{
|
||||
m1: l,
|
||||
m1: ln,
|
||||
m1IP: ip,
|
||||
m2: l,
|
||||
m2: ln,
|
||||
m2IP: ip,
|
||||
stun: l,
|
||||
stun: ln,
|
||||
stunIP: ip,
|
||||
}
|
||||
testTwoDevicePing(t, n)
|
||||
|
||||
@ -126,24 +126,24 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported
|
||||
return le
|
||||
}
|
||||
|
||||
// gro attempts to enqueue p on g if l supports a GRO kind matching the
|
||||
// gro attempts to enqueue p on g if ep supports a GRO kind matching the
|
||||
// transport protocol carried in p. gro may allocate g if it is nil. gro can
|
||||
// either return the existing g, a newly allocated one, or nil. Callers are
|
||||
// responsible for calling Flush() on the returned value if it is non-nil once
|
||||
// they have finished iterating through all GRO candidates for a given vector.
|
||||
// If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via
|
||||
// If gro allocates a *gro.GRO it will have ep's stack.NetworkDispatcher set via
|
||||
// SetDispatcher().
|
||||
func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO {
|
||||
if !buildfeatures.HasGRO || l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP {
|
||||
func (ep *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO {
|
||||
if !buildfeatures.HasGRO || ep.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP {
|
||||
// IPv6 may have extension headers preceding a TCP header, but we trade
|
||||
// for a fast path and assume p cannot be coalesced in such a case.
|
||||
l.injectInbound(p)
|
||||
ep.injectInbound(p)
|
||||
return g
|
||||
}
|
||||
if g == nil {
|
||||
l.mu.RLock()
|
||||
d := l.dispatcher
|
||||
l.mu.RUnlock()
|
||||
ep.mu.RLock()
|
||||
d := ep.dispatcher
|
||||
ep.mu.RUnlock()
|
||||
g = gro.NewGRO()
|
||||
g.SetDispatcher(d)
|
||||
}
|
||||
@ -154,39 +154,39 @@ func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO {
|
||||
// Close closes l. Further packet injections will return an error, and all
|
||||
// pending packets are discarded. Close may be called concurrently with
|
||||
// WritePackets.
|
||||
func (l *linkEndpoint) Close() {
|
||||
l.mu.Lock()
|
||||
l.dispatcher = nil
|
||||
l.mu.Unlock()
|
||||
l.q.Close()
|
||||
l.Drain()
|
||||
func (ep *linkEndpoint) Close() {
|
||||
ep.mu.Lock()
|
||||
ep.dispatcher = nil
|
||||
ep.mu.Unlock()
|
||||
ep.q.Close()
|
||||
ep.Drain()
|
||||
}
|
||||
|
||||
// Read does non-blocking read one packet from the outbound packet queue.
|
||||
func (l *linkEndpoint) Read() *stack.PacketBuffer {
|
||||
return l.q.Read()
|
||||
func (ep *linkEndpoint) Read() *stack.PacketBuffer {
|
||||
return ep.q.Read()
|
||||
}
|
||||
|
||||
// ReadContext does blocking read for one packet from the outbound packet queue.
|
||||
// It can be cancelled by ctx, and in this case, it returns nil.
|
||||
func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer {
|
||||
return l.q.ReadContext(ctx)
|
||||
func (ep *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer {
|
||||
return ep.q.ReadContext(ctx)
|
||||
}
|
||||
|
||||
// Drain removes all outbound packets from the channel and counts them.
|
||||
func (l *linkEndpoint) Drain() int {
|
||||
return l.q.Drain()
|
||||
func (ep *linkEndpoint) Drain() int {
|
||||
return ep.q.Drain()
|
||||
}
|
||||
|
||||
// NumQueued returns the number of packets queued for outbound.
|
||||
func (l *linkEndpoint) NumQueued() int {
|
||||
return l.q.Num()
|
||||
func (ep *linkEndpoint) NumQueued() int {
|
||||
return ep.q.Num()
|
||||
}
|
||||
|
||||
func (l *linkEndpoint) injectInbound(p *packet.Parsed) {
|
||||
l.mu.RLock()
|
||||
d := l.dispatcher
|
||||
l.mu.RUnlock()
|
||||
func (ep *linkEndpoint) injectInbound(p *packet.Parsed) {
|
||||
ep.mu.RLock()
|
||||
d := ep.dispatcher
|
||||
ep.mu.RUnlock()
|
||||
if d == nil || !buildfeatures.HasNetstack {
|
||||
return
|
||||
}
|
||||
@ -200,35 +200,35 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) {
|
||||
|
||||
// Attach saves the stack network-layer dispatcher for use later when packets
|
||||
// are injected.
|
||||
func (l *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.dispatcher = dispatcher
|
||||
func (ep *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
|
||||
ep.mu.Lock()
|
||||
defer ep.mu.Unlock()
|
||||
ep.dispatcher = dispatcher
|
||||
}
|
||||
|
||||
// IsAttached implements stack.LinkEndpoint.IsAttached.
|
||||
func (l *linkEndpoint) IsAttached() bool {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return l.dispatcher != nil
|
||||
func (ep *linkEndpoint) IsAttached() bool {
|
||||
ep.mu.RLock()
|
||||
defer ep.mu.RUnlock()
|
||||
return ep.dispatcher != nil
|
||||
}
|
||||
|
||||
// MTU implements stack.LinkEndpoint.MTU.
|
||||
func (l *linkEndpoint) MTU() uint32 {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return l.mtu
|
||||
func (ep *linkEndpoint) MTU() uint32 {
|
||||
ep.mu.RLock()
|
||||
defer ep.mu.RUnlock()
|
||||
return ep.mtu
|
||||
}
|
||||
|
||||
// SetMTU implements stack.LinkEndpoint.SetMTU.
|
||||
func (l *linkEndpoint) SetMTU(mtu uint32) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.mtu = mtu
|
||||
func (ep *linkEndpoint) SetMTU(mtu uint32) {
|
||||
ep.mu.Lock()
|
||||
defer ep.mu.Unlock()
|
||||
ep.mtu = mtu
|
||||
}
|
||||
|
||||
// Capabilities implements stack.LinkEndpoint.Capabilities.
|
||||
func (l *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities {
|
||||
func (ep *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities {
|
||||
// We are required to offload RX checksum validation for the purposes of
|
||||
// GRO.
|
||||
return stack.CapabilityRXChecksumOffload
|
||||
@ -242,8 +242,8 @@ func (*linkEndpoint) GSOMaxSize() uint32 {
|
||||
}
|
||||
|
||||
// SupportedGSO implements stack.GSOEndpoint.
|
||||
func (l *linkEndpoint) SupportedGSO() stack.SupportedGSO {
|
||||
return l.SupportedGSOKind
|
||||
func (ep *linkEndpoint) SupportedGSO() stack.SupportedGSO {
|
||||
return ep.SupportedGSOKind
|
||||
}
|
||||
|
||||
// MaxHeaderLength returns the maximum size of the link layer header. Given it
|
||||
@ -253,22 +253,22 @@ func (*linkEndpoint) MaxHeaderLength() uint16 {
|
||||
}
|
||||
|
||||
// LinkAddress returns the link address of this endpoint.
|
||||
func (l *linkEndpoint) LinkAddress() tcpip.LinkAddress {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return l.linkAddr
|
||||
func (ep *linkEndpoint) LinkAddress() tcpip.LinkAddress {
|
||||
ep.mu.RLock()
|
||||
defer ep.mu.RUnlock()
|
||||
return ep.linkAddr
|
||||
}
|
||||
|
||||
// SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress.
|
||||
func (l *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.linkAddr = addr
|
||||
func (ep *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) {
|
||||
ep.mu.Lock()
|
||||
defer ep.mu.Unlock()
|
||||
ep.linkAddr = addr
|
||||
}
|
||||
|
||||
// WritePackets stores outbound packets into the channel.
|
||||
// Multiple concurrent calls are permitted.
|
||||
func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {
|
||||
func (ep *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {
|
||||
n := 0
|
||||
// TODO(jwhited): evaluate writing a stack.PacketBufferList instead of a
|
||||
// single packet. We can split 2 x 64K GSO across
|
||||
@ -278,7 +278,7 @@ func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Err
|
||||
// control MTU (and by effect TCP MSS in gVisor) we *shouldn't* expect to
|
||||
// ever overflow 128 slots (see wireguard-go/tun.ErrTooManySegments usage).
|
||||
for _, pkt := range pkts.AsSlice() {
|
||||
if err := l.q.Write(pkt); err != nil {
|
||||
if err := ep.q.Write(pkt); err != nil {
|
||||
if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@ -870,7 +870,7 @@ func (o *fakeOS) run(args ...string) error {
|
||||
rest = family + " " + strings.Join(args[3:], " ")
|
||||
}
|
||||
|
||||
var l *[]string
|
||||
var ls *[]string
|
||||
switch args[1] {
|
||||
case "link":
|
||||
got := strings.Join(args[2:], " ")
|
||||
@ -884,31 +884,31 @@ func (o *fakeOS) run(args ...string) error {
|
||||
}
|
||||
return nil
|
||||
case "addr":
|
||||
l = &o.ips
|
||||
ls = &o.ips
|
||||
case "route":
|
||||
l = &o.routes
|
||||
ls = &o.routes
|
||||
case "rule":
|
||||
l = &o.rules
|
||||
ls = &o.rules
|
||||
default:
|
||||
return unexpected()
|
||||
}
|
||||
|
||||
switch args[2] {
|
||||
case "add":
|
||||
for _, el := range *l {
|
||||
for _, el := range *ls {
|
||||
if el == rest {
|
||||
o.t.Errorf("can't add %q, already present", rest)
|
||||
return errors.New("already exists")
|
||||
}
|
||||
}
|
||||
*l = append(*l, rest)
|
||||
sort.Strings(*l)
|
||||
*ls = append(*ls, rest)
|
||||
sort.Strings(*ls)
|
||||
case "del":
|
||||
found := false
|
||||
for i, el := range *l {
|
||||
for i, el := range *ls {
|
||||
if el == rest {
|
||||
found = true
|
||||
*l = append((*l)[:i], (*l)[i+1:]...)
|
||||
*ls = append((*ls)[:i], (*ls)[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user