mirror of
https://github.com/tailscale/tailscale.git
synced 2025-10-03 19:41:34 +02:00
feature/featuretags: add build tag to remove captive portal detection
This doesn't yet fully pull it out into a feature/captiveportal package. This is the usual first step, moving the code to its own files within the same packages. Updates #17254 Change-Id: Idfaec839debf7c96f51ca6520ce36ccf2f8eec92 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
parent
4657cbdb11
commit
8fe575409f
@ -103,7 +103,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
|
|||||||
tailscale.com/envknob from tailscale.com/client/local+
|
tailscale.com/envknob from tailscale.com/client/local+
|
||||||
tailscale.com/envknob/featureknob from tailscale.com/client/web
|
tailscale.com/envknob/featureknob from tailscale.com/client/web
|
||||||
tailscale.com/feature from tailscale.com/tsweb+
|
tailscale.com/feature from tailscale.com/tsweb+
|
||||||
tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli
|
tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+
|
||||||
tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli
|
tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli
|
||||||
tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli
|
tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli
|
||||||
tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli
|
tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli
|
||||||
|
@ -123,6 +123,19 @@ func TestOmitACME(t *testing.T) {
|
|||||||
}.Check(t)
|
}.Check(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOmitCaptivePortal(t *testing.T) {
|
||||||
|
deptest.DepChecker{
|
||||||
|
GOOS: "linux",
|
||||||
|
GOARCH: "amd64",
|
||||||
|
Tags: "ts_omit_captiveportal,ts_include_cli",
|
||||||
|
OnDep: func(dep string) {
|
||||||
|
if strings.Contains(dep, "captive") {
|
||||||
|
t.Errorf("unexpected dep with ts_omit_captiveportal: %q", dep)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}.Check(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestOmitOAuthKey(t *testing.T) {
|
func TestOmitOAuthKey(t *testing.T) {
|
||||||
deptest.DepChecker{
|
deptest.DepChecker{
|
||||||
GOOS: "linux",
|
GOOS: "linux",
|
||||||
|
13
feature/buildfeatures/feature_captiveportal_disabled.go
Normal file
13
feature/buildfeatures/feature_captiveportal_disabled.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code generated by gen.go; DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:build ts_omit_captiveportal
|
||||||
|
|
||||||
|
package buildfeatures
|
||||||
|
|
||||||
|
// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection".
|
||||||
|
// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag.
|
||||||
|
// It's a const so it can be used for dead code elimination.
|
||||||
|
const HasCaptivePortal = false
|
13
feature/buildfeatures/feature_captiveportal_enabled.go
Normal file
13
feature/buildfeatures/feature_captiveportal_enabled.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code generated by gen.go; DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:build !ts_omit_captiveportal
|
||||||
|
|
||||||
|
package buildfeatures
|
||||||
|
|
||||||
|
// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection".
|
||||||
|
// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag.
|
||||||
|
// It's a const so it can be used for dead code elimination.
|
||||||
|
const HasCaptivePortal = true
|
@ -93,6 +93,7 @@ var Features = map[FeatureTag]FeatureMeta{
|
|||||||
"acme": {"ACME", "ACME TLS certificate management", nil},
|
"acme": {"ACME", "ACME TLS certificate management", nil},
|
||||||
"aws": {"AWS", "AWS integration", nil},
|
"aws": {"AWS", "AWS integration", nil},
|
||||||
"bird": {"Bird", "Bird BGP integration", nil},
|
"bird": {"Bird", "Bird BGP integration", nil},
|
||||||
|
"captiveportal": {"CaptivePortal", "Captive portal detection", nil},
|
||||||
"capture": {"Capture", "Packet capture", nil},
|
"capture": {"Capture", "Packet capture", nil},
|
||||||
"cli": {"CLI", "embed the CLI into the tailscaled binary", nil},
|
"cli": {"CLI", "embed the CLI into the tailscaled binary", nil},
|
||||||
"completion": {"Completion", "CLI shell completion", nil},
|
"completion": {"Completion", "CLI shell completion", nil},
|
||||||
|
186
ipn/ipnlocal/captiveportal.go
Normal file
186
ipn/ipnlocal/captiveportal.go
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !ts_omit_captiveportal
|
||||||
|
|
||||||
|
package ipnlocal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"tailscale.com/health"
|
||||||
|
"tailscale.com/net/captivedetection"
|
||||||
|
"tailscale.com/util/clientmetric"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
hookCaptivePortalHealthChange.Set(captivePortalHealthChange)
|
||||||
|
hookCheckCaptivePortalLoop.Set(checkCaptivePortalLoop)
|
||||||
|
}
|
||||||
|
|
||||||
|
var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected")
|
||||||
|
|
||||||
|
// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken
|
||||||
|
// before running captive portal detection.
|
||||||
|
const captivePortalDetectionInterval = 2 * time.Second
|
||||||
|
|
||||||
|
func captivePortalHealthChange(b *LocalBackend, state *health.State) {
|
||||||
|
isConnectivityImpacted := false
|
||||||
|
for _, w := range state.Warnings {
|
||||||
|
// Ignore the captive portal warnable itself.
|
||||||
|
if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code {
|
||||||
|
isConnectivityImpacted = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// captiveCtx can be changed, and is protected with 'mu'; grab that
|
||||||
|
// before we start our select, below.
|
||||||
|
//
|
||||||
|
// It is guaranteed to be non-nil.
|
||||||
|
b.mu.Lock()
|
||||||
|
ctx := b.captiveCtx
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
// If the context is canceled, we don't need to do anything.
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if isConnectivityImpacted {
|
||||||
|
b.logf("health: connectivity impacted; triggering captive portal detection")
|
||||||
|
|
||||||
|
// Ensure that we select on captiveCtx so that we can time out
|
||||||
|
// triggering captive portal detection if the backend is shutdown.
|
||||||
|
select {
|
||||||
|
case b.needsCaptiveDetection <- true:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If connectivity is not impacted, we know for sure we're not behind a captive portal,
|
||||||
|
// so drop any warning, and signal that we don't need captive portal detection.
|
||||||
|
b.health.SetHealthy(captivePortalWarnable)
|
||||||
|
select {
|
||||||
|
case b.needsCaptiveDetection <- false:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected.
|
||||||
|
var captivePortalWarnable = health.Register(&health.Warnable{
|
||||||
|
Code: "captive-portal-detected",
|
||||||
|
Title: "Captive portal detected",
|
||||||
|
// High severity, because captive portals block all traffic and require user intervention.
|
||||||
|
Severity: health.SeverityHigh,
|
||||||
|
Text: health.StaticMessage("This network requires you to log in using your web browser."),
|
||||||
|
ImpactsConnectivity: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
func checkCaptivePortalLoop(b *LocalBackend, ctx context.Context) {
|
||||||
|
var tmr *time.Timer
|
||||||
|
|
||||||
|
maybeStartTimer := func() {
|
||||||
|
// If there's an existing timer, nothing to do; just continue
|
||||||
|
// waiting for it to expire. Otherwise, create a new timer.
|
||||||
|
if tmr == nil {
|
||||||
|
tmr = time.NewTimer(captivePortalDetectionInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
maybeStopTimer := func() {
|
||||||
|
if tmr == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !tmr.Stop() {
|
||||||
|
<-tmr.C
|
||||||
|
}
|
||||||
|
tmr = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
maybeStopTimer()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// First, see if we have a signal on our "healthy" channel, which
|
||||||
|
// takes priority over an existing timer. Because a select is
|
||||||
|
// nondeterministic, we explicitly check this channel before
|
||||||
|
// entering the main select below, so that we're guaranteed to
|
||||||
|
// stop the timer before starting captive portal detection.
|
||||||
|
select {
|
||||||
|
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||||
|
if needsCaptiveDetection {
|
||||||
|
maybeStartTimer()
|
||||||
|
} else {
|
||||||
|
maybeStopTimer()
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
var timerChan <-chan time.Time
|
||||||
|
if tmr != nil {
|
||||||
|
timerChan = tmr.C
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// All done; stop the timer and then exit.
|
||||||
|
maybeStopTimer()
|
||||||
|
return
|
||||||
|
case <-timerChan:
|
||||||
|
// Kick off captive portal check
|
||||||
|
b.performCaptiveDetection()
|
||||||
|
// nil out timer to force recreation
|
||||||
|
tmr = nil
|
||||||
|
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||||
|
if needsCaptiveDetection {
|
||||||
|
maybeStartTimer()
|
||||||
|
} else {
|
||||||
|
// Healthy; cancel any existing timer
|
||||||
|
maybeStopTimer()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldRunCaptivePortalDetection reports whether captive portal detection
|
||||||
|
// should be run. It is enabled by default, but can be disabled via a control
|
||||||
|
// knob. It is also only run when the user explicitly wants the backend to be
|
||||||
|
// running.
|
||||||
|
func (b *LocalBackend) shouldRunCaptivePortalDetection() bool {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning()
|
||||||
|
}
|
||||||
|
|
||||||
|
// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs
|
||||||
|
// the detection and updates the Warnable accordingly.
|
||||||
|
func (b *LocalBackend) performCaptiveDetection() {
|
||||||
|
if !b.shouldRunCaptivePortalDetection() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
d := captivedetection.NewDetector(b.logf)
|
||||||
|
b.mu.Lock() // for b.hostinfo
|
||||||
|
cn := b.currentNode()
|
||||||
|
dm := cn.DERPMap()
|
||||||
|
preferredDERP := 0
|
||||||
|
if b.hostinfo != nil {
|
||||||
|
if b.hostinfo.NetInfo != nil {
|
||||||
|
preferredDERP = b.hostinfo.NetInfo.PreferredDERP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx := b.ctx
|
||||||
|
netMon := b.NetMon()
|
||||||
|
b.mu.Unlock()
|
||||||
|
found := d.Detect(ctx, netMon, dm, preferredDERP)
|
||||||
|
if found {
|
||||||
|
if !b.health.IsUnhealthy(captivePortalWarnable) {
|
||||||
|
metricCaptivePortalDetected.Add(1)
|
||||||
|
}
|
||||||
|
b.health.SetUnhealthy(captivePortalWarnable, health.Args{})
|
||||||
|
} else {
|
||||||
|
b.health.SetHealthy(captivePortalWarnable)
|
||||||
|
}
|
||||||
|
}
|
@ -64,7 +64,6 @@ import (
|
|||||||
"tailscale.com/ipn/policy"
|
"tailscale.com/ipn/policy"
|
||||||
"tailscale.com/log/sockstatlog"
|
"tailscale.com/log/sockstatlog"
|
||||||
"tailscale.com/logpolicy"
|
"tailscale.com/logpolicy"
|
||||||
"tailscale.com/net/captivedetection"
|
|
||||||
"tailscale.com/net/dns"
|
"tailscale.com/net/dns"
|
||||||
"tailscale.com/net/dnscache"
|
"tailscale.com/net/dnscache"
|
||||||
"tailscale.com/net/dnsfallback"
|
"tailscale.com/net/dnsfallback"
|
||||||
@ -168,8 +167,6 @@ type watchSession struct {
|
|||||||
cancel context.CancelFunc // to shut down the session
|
cancel context.CancelFunc // to shut down the session
|
||||||
}
|
}
|
||||||
|
|
||||||
var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected")
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// errShutdown indicates that the [LocalBackend.Shutdown] was called.
|
// errShutdown indicates that the [LocalBackend.Shutdown] was called.
|
||||||
errShutdown = errors.New("shutting down")
|
errShutdown = errors.New("shutting down")
|
||||||
@ -943,10 +940,6 @@ func (b *LocalBackend) DisconnectControl() {
|
|||||||
cc.Shutdown()
|
cc.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken
|
|
||||||
// before running captive portal detection.
|
|
||||||
const captivePortalDetectionInterval = 2 * time.Second
|
|
||||||
|
|
||||||
// linkChange is our network monitor callback, called whenever the network changes.
|
// linkChange is our network monitor callback, called whenever the network changes.
|
||||||
func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) {
|
func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
@ -1002,6 +995,12 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Captive portal detection hooks.
|
||||||
|
var (
|
||||||
|
hookCaptivePortalHealthChange feature.Hook[func(*LocalBackend, *health.State)]
|
||||||
|
hookCheckCaptivePortalLoop feature.Hook[func(*LocalBackend, context.Context)]
|
||||||
|
)
|
||||||
|
|
||||||
func (b *LocalBackend) onHealthChange(change health.Change) {
|
func (b *LocalBackend) onHealthChange(change health.Change) {
|
||||||
if change.WarnableChanged {
|
if change.WarnableChanged {
|
||||||
w := change.Warnable
|
w := change.Warnable
|
||||||
@ -1019,45 +1018,8 @@ func (b *LocalBackend) onHealthChange(change health.Change) {
|
|||||||
Health: state,
|
Health: state,
|
||||||
})
|
})
|
||||||
|
|
||||||
isConnectivityImpacted := false
|
if f, ok := hookCaptivePortalHealthChange.GetOk(); ok {
|
||||||
for _, w := range state.Warnings {
|
f(b, state)
|
||||||
// Ignore the captive portal warnable itself.
|
|
||||||
if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code {
|
|
||||||
isConnectivityImpacted = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// captiveCtx can be changed, and is protected with 'mu'; grab that
|
|
||||||
// before we start our select, below.
|
|
||||||
//
|
|
||||||
// It is guaranteed to be non-nil.
|
|
||||||
b.mu.Lock()
|
|
||||||
ctx := b.captiveCtx
|
|
||||||
b.mu.Unlock()
|
|
||||||
|
|
||||||
// If the context is canceled, we don't need to do anything.
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if isConnectivityImpacted {
|
|
||||||
b.logf("health: connectivity impacted; triggering captive portal detection")
|
|
||||||
|
|
||||||
// Ensure that we select on captiveCtx so that we can time out
|
|
||||||
// triggering captive portal detection if the backend is shutdown.
|
|
||||||
select {
|
|
||||||
case b.needsCaptiveDetection <- true:
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If connectivity is not impacted, we know for sure we're not behind a captive portal,
|
|
||||||
// so drop any warning, and signal that we don't need captive portal detection.
|
|
||||||
b.health.SetHealthy(captivePortalWarnable)
|
|
||||||
select {
|
|
||||||
case b.needsCaptiveDetection <- false:
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1115,7 +1077,7 @@ func (b *LocalBackend) Shutdown() {
|
|||||||
}
|
}
|
||||||
b.shutdownCalled = true
|
b.shutdownCalled = true
|
||||||
|
|
||||||
if b.captiveCancel != nil {
|
if buildfeatures.HasCaptivePortal && b.captiveCancel != nil {
|
||||||
b.logf("canceling captive portal context")
|
b.logf("canceling captive portal context")
|
||||||
b.captiveCancel()
|
b.captiveCancel()
|
||||||
}
|
}
|
||||||
@ -2767,123 +2729,6 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected.
|
|
||||||
var captivePortalWarnable = health.Register(&health.Warnable{
|
|
||||||
Code: "captive-portal-detected",
|
|
||||||
Title: "Captive portal detected",
|
|
||||||
// High severity, because captive portals block all traffic and require user intervention.
|
|
||||||
Severity: health.SeverityHigh,
|
|
||||||
Text: health.StaticMessage("This network requires you to log in using your web browser."),
|
|
||||||
ImpactsConnectivity: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
func (b *LocalBackend) checkCaptivePortalLoop(ctx context.Context) {
|
|
||||||
var tmr *time.Timer
|
|
||||||
|
|
||||||
maybeStartTimer := func() {
|
|
||||||
// If there's an existing timer, nothing to do; just continue
|
|
||||||
// waiting for it to expire. Otherwise, create a new timer.
|
|
||||||
if tmr == nil {
|
|
||||||
tmr = time.NewTimer(captivePortalDetectionInterval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
maybeStopTimer := func() {
|
|
||||||
if tmr == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !tmr.Stop() {
|
|
||||||
<-tmr.C
|
|
||||||
}
|
|
||||||
tmr = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
maybeStopTimer()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, see if we have a signal on our "healthy" channel, which
|
|
||||||
// takes priority over an existing timer. Because a select is
|
|
||||||
// nondeterministic, we explicitly check this channel before
|
|
||||||
// entering the main select below, so that we're guaranteed to
|
|
||||||
// stop the timer before starting captive portal detection.
|
|
||||||
select {
|
|
||||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
|
||||||
if needsCaptiveDetection {
|
|
||||||
maybeStartTimer()
|
|
||||||
} else {
|
|
||||||
maybeStopTimer()
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
var timerChan <-chan time.Time
|
|
||||||
if tmr != nil {
|
|
||||||
timerChan = tmr.C
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// All done; stop the timer and then exit.
|
|
||||||
maybeStopTimer()
|
|
||||||
return
|
|
||||||
case <-timerChan:
|
|
||||||
// Kick off captive portal check
|
|
||||||
b.performCaptiveDetection()
|
|
||||||
// nil out timer to force recreation
|
|
||||||
tmr = nil
|
|
||||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
|
||||||
if needsCaptiveDetection {
|
|
||||||
maybeStartTimer()
|
|
||||||
} else {
|
|
||||||
// Healthy; cancel any existing timer
|
|
||||||
maybeStopTimer()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs
|
|
||||||
// the detection and updates the Warnable accordingly.
|
|
||||||
func (b *LocalBackend) performCaptiveDetection() {
|
|
||||||
if !b.shouldRunCaptivePortalDetection() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
d := captivedetection.NewDetector(b.logf)
|
|
||||||
b.mu.Lock() // for b.hostinfo
|
|
||||||
cn := b.currentNode()
|
|
||||||
dm := cn.DERPMap()
|
|
||||||
preferredDERP := 0
|
|
||||||
if b.hostinfo != nil {
|
|
||||||
if b.hostinfo.NetInfo != nil {
|
|
||||||
preferredDERP = b.hostinfo.NetInfo.PreferredDERP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx := b.ctx
|
|
||||||
netMon := b.NetMon()
|
|
||||||
b.mu.Unlock()
|
|
||||||
found := d.Detect(ctx, netMon, dm, preferredDERP)
|
|
||||||
if found {
|
|
||||||
if !b.health.IsUnhealthy(captivePortalWarnable) {
|
|
||||||
metricCaptivePortalDetected.Add(1)
|
|
||||||
}
|
|
||||||
b.health.SetUnhealthy(captivePortalWarnable, health.Args{})
|
|
||||||
} else {
|
|
||||||
b.health.SetHealthy(captivePortalWarnable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRunCaptivePortalDetection reports whether captive portal detection
|
|
||||||
// should be run. It is enabled by default, but can be disabled via a control
|
|
||||||
// knob. It is also only run when the user explicitly wants the backend to be
|
|
||||||
// running.
|
|
||||||
func (b *LocalBackend) shouldRunCaptivePortalDetection() bool {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning()
|
|
||||||
}
|
|
||||||
|
|
||||||
// packetFilterPermitsUnlockedNodes reports any peer in peers with the
|
// packetFilterPermitsUnlockedNodes reports any peer in peers with the
|
||||||
// UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet
|
// UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet
|
||||||
// filter.
|
// filter.
|
||||||
@ -5715,16 +5560,18 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock
|
|||||||
// Start a captive portal detection loop if none has been
|
// Start a captive portal detection loop if none has been
|
||||||
// started. Create a new context if none is present, since it
|
// started. Create a new context if none is present, since it
|
||||||
// can be shut down if we transition away from Running.
|
// can be shut down if we transition away from Running.
|
||||||
|
if buildfeatures.HasCaptivePortal {
|
||||||
if b.captiveCancel == nil {
|
if b.captiveCancel == nil {
|
||||||
b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx)
|
b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx)
|
||||||
b.goTracker.Go(func() { b.checkCaptivePortalLoop(b.captiveCtx) })
|
b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, b.captiveCtx) })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if oldState == ipn.Running {
|
} else if oldState == ipn.Running {
|
||||||
// Transitioning away from running.
|
// Transitioning away from running.
|
||||||
b.closePeerAPIListenersLocked()
|
b.closePeerAPIListenersLocked()
|
||||||
|
|
||||||
// Stop any existing captive portal detection loop.
|
// Stop any existing captive portal detection loop.
|
||||||
if b.captiveCancel != nil {
|
if buildfeatures.HasCaptivePortal && b.captiveCancel != nil {
|
||||||
b.captiveCancel()
|
b.captiveCancel()
|
||||||
b.captiveCancel = nil
|
b.captiveCancel = nil
|
||||||
|
|
||||||
|
55
net/netcheck/captiveportal.go
Normal file
55
net/netcheck/captiveportal.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build !ts_omit_captiveportal
|
||||||
|
|
||||||
|
package netcheck
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"tailscale.com/net/captivedetection"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
hookStartCaptivePortalDetection.Set(startCaptivePortalDetection)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startCaptivePortalDetection(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (done <-chan struct{}, stop func()) {
|
||||||
|
c := rs.c
|
||||||
|
|
||||||
|
// NOTE(andrew): we can't simply add this goroutine to the
|
||||||
|
// `NewWaitGroupChan` below, since we don't wait for that
|
||||||
|
// waitgroup to finish when exiting this function and thus get
|
||||||
|
// a data race.
|
||||||
|
ch := make(chan struct{})
|
||||||
|
|
||||||
|
tmr := time.AfterFunc(c.captivePortalDelay(), func() {
|
||||||
|
defer close(ch)
|
||||||
|
d := captivedetection.NewDetector(c.logf)
|
||||||
|
found := d.Detect(ctx, c.NetMon, dm, preferredDERP)
|
||||||
|
rs.report.CaptivePortal.Set(found)
|
||||||
|
})
|
||||||
|
|
||||||
|
stop = func() {
|
||||||
|
// Don't cancel our captive portal check if we're
|
||||||
|
// explicitly doing a verbose netcheck.
|
||||||
|
if c.Verbose {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tmr.Stop() {
|
||||||
|
// Stopped successfully; need to close the
|
||||||
|
// signal channel ourselves.
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Did not stop; do nothing and it'll finish by itself
|
||||||
|
// and close the signal channel.
|
||||||
|
}
|
||||||
|
|
||||||
|
return ch, stop
|
||||||
|
}
|
@ -26,8 +26,9 @@ import (
|
|||||||
"tailscale.com/derp"
|
"tailscale.com/derp"
|
||||||
"tailscale.com/derp/derphttp"
|
"tailscale.com/derp/derphttp"
|
||||||
"tailscale.com/envknob"
|
"tailscale.com/envknob"
|
||||||
|
"tailscale.com/feature"
|
||||||
|
"tailscale.com/feature/buildfeatures"
|
||||||
"tailscale.com/hostinfo"
|
"tailscale.com/hostinfo"
|
||||||
"tailscale.com/net/captivedetection"
|
|
||||||
"tailscale.com/net/dnscache"
|
"tailscale.com/net/dnscache"
|
||||||
"tailscale.com/net/neterror"
|
"tailscale.com/net/neterror"
|
||||||
"tailscale.com/net/netmon"
|
"tailscale.com/net/netmon"
|
||||||
@ -786,6 +787,8 @@ func (c *Client) SetForcePreferredDERP(region int) {
|
|||||||
c.ForcePreferredDERP = region
|
c.ForcePreferredDERP = region
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var hookStartCaptivePortalDetection feature.Hook[func(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (<-chan struct{}, func())]
|
||||||
|
|
||||||
// GetReport gets a report. The 'opts' argument is optional and can be nil.
|
// GetReport gets a report. The 'opts' argument is optional and can be nil.
|
||||||
// Callers are discouraged from passing a ctx with an arbitrary deadline as this
|
// Callers are discouraged from passing a ctx with an arbitrary deadline as this
|
||||||
// may cause GetReport to return prematurely before all reporting methods have
|
// may cause GetReport to return prematurely before all reporting methods have
|
||||||
@ -910,38 +913,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
|
|||||||
// it's unnecessary.
|
// it's unnecessary.
|
||||||
captivePortalDone := syncs.ClosedChan()
|
captivePortalDone := syncs.ClosedChan()
|
||||||
captivePortalStop := func() {}
|
captivePortalStop := func() {}
|
||||||
if !rs.incremental && !onlySTUN {
|
if buildfeatures.HasCaptivePortal && !rs.incremental && !onlySTUN {
|
||||||
// NOTE(andrew): we can't simply add this goroutine to the
|
start := hookStartCaptivePortalDetection.Get()
|
||||||
// `NewWaitGroupChan` below, since we don't wait for that
|
captivePortalDone, captivePortalStop = start(ctx, rs, dm, preferredDERP)
|
||||||
// waitgroup to finish when exiting this function and thus get
|
|
||||||
// a data race.
|
|
||||||
ch := make(chan struct{})
|
|
||||||
captivePortalDone = ch
|
|
||||||
|
|
||||||
tmr := time.AfterFunc(c.captivePortalDelay(), func() {
|
|
||||||
defer close(ch)
|
|
||||||
d := captivedetection.NewDetector(c.logf)
|
|
||||||
found := d.Detect(ctx, c.NetMon, dm, preferredDERP)
|
|
||||||
rs.report.CaptivePortal.Set(found)
|
|
||||||
})
|
|
||||||
|
|
||||||
captivePortalStop = func() {
|
|
||||||
// Don't cancel our captive portal check if we're
|
|
||||||
// explicitly doing a verbose netcheck.
|
|
||||||
if c.Verbose {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if tmr.Stop() {
|
|
||||||
// Stopped successfully; need to close the
|
|
||||||
// signal channel ourselves.
|
|
||||||
close(ch)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Did not stop; do nothing and it'll finish by itself
|
|
||||||
// and close the signal channel.
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wg := syncs.NewWaitGroupChan()
|
wg := syncs.NewWaitGroupChan()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user