policy/v2: wire PolicyManager through compiledGrant

Threads PolicyManager into compiledGrant so via grants resolve
viewer identity at compile time instead of re-resolving per MapRequest.
Adds a matchersForNodeMap cache invalidated on policy reload and on node
add/remove.

Updates #3157
This commit is contained in:
Kristoffer Dalby 2026-04-15 08:27:28 +00:00
parent b01e67e8e5
commit b051e7b2bc
7 changed files with 1065 additions and 831 deletions

View File

@ -0,0 +1,729 @@
package v2
import (
"net/netip"
"slices"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log"
"go4.org/netipx"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
)
// grantCategory classifies a grant by what per-node work it needs.
type grantCategory int
const (
// grantCategoryRegular requires no per-node work. The pre-compiled
// rules are complete and only need ReduceFilterRules.
grantCategoryRegular grantCategory = iota
// grantCategorySelf has autogroup:self destinations that must be
// expanded per-node to same-user untagged device IPs.
grantCategorySelf
// grantCategoryVia has Via tags that route rules to specific
// nodes based on their tags and advertised routes.
grantCategoryVia
)
// compiledGrant is a grant with its sources already resolved to IP
// addresses. The expensive work (alias → IP resolution) is done once
// here. Extracting rules for a specific node reads from pre-resolved
// data without re-resolving.
type compiledGrant struct {
category grantCategory
// srcIPStrings is the final SrcIPs for non-self rules, with
// nonWildcardSrcs appended to match Tailscale SaaS behavior.
srcIPStrings []string
hasWildcard bool
hasDangerAll bool
// rules are the pre-compiled filter rules for non-self, non-via
// destinations. For regular grants this is the complete output.
// For self grants with mixed destinations (self + other), this
// is the non-self portion only.
rules []tailcfg.FilterRule
// self is non-nil when the grant has autogroup:self destinations.
self *selfGrantData
// via is non-nil when the grant has Via tags.
via *viaGrantData
}
// selfGrantData holds data needed for per-node autogroup:self
// compilation. Sources are already resolved.
type selfGrantData struct {
resolvedSrcs []ResolvedAddresses
internetProtocols []ProtocolPort
app tailcfg.PeerCapMap
}
// viaGrantData holds data needed for per-node via-grant compilation.
// Sources are already resolved into srcIPStrings.
type viaGrantData struct {
viaTags []Tag
destinations Aliases
internetProtocols []ProtocolPort
srcIPStrings []string
}
// userNodeIndex maps user IDs to their untagged nodes. Built once per
// policy or node-set change and read from many goroutines under
// PolicyManager.mu; readers must hold the lock (or the snapshot
// returned to them).
type userNodeIndex map[uint][]types.NodeView
func buildUserNodeIndex(
nodes views.Slice[types.NodeView],
) userNodeIndex {
idx := make(userNodeIndex)
for _, n := range nodes.All() {
if !n.IsTagged() && n.User().Valid() {
uid := n.User().ID()
idx[uid] = append(idx[uid], n)
}
}
return idx
}
// compileGrants resolves all policy grants into compiledGrant structs.
// Source resolution and non-self destination resolution happens once
// here. This is the single resolution path that replaces the
// duplicated work in compileFilterRules and compileGrantWithAutogroupSelf.
func (pol *Policy) compileGrants(
users types.Users,
nodes views.Slice[types.NodeView],
) []compiledGrant {
if pol == nil || (pol.ACLs == nil && len(pol.Grants) == 0) {
return nil
}
grants := pol.Grants
for _, acl := range pol.ACLs {
grants = append(grants, aclToGrants(acl)...)
}
compiled := make([]compiledGrant, 0, len(grants))
for _, grant := range grants {
cg, err := pol.compileOneGrant(grant, users, nodes)
if err != nil {
log.Trace().Err(err).Msg("compiling grant")
continue
}
if cg != nil {
compiled = append(compiled, *cg)
}
}
return compiled
}
// compileOneGrant resolves a single grant into a compiledGrant.
// All source resolution happens here. Non-self, non-via destination
// resolution also happens here. Per-node data (self dests, via
// matching) is stored for deferred compilation.
//
//nolint:gocyclo,cyclop
func (pol *Policy) compileOneGrant(
grant Grant,
users types.Users,
nodes views.Slice[types.NodeView],
) (*compiledGrant, error) {
// Via grants: resolve sources, store deferred data.
if len(grant.Via) > 0 {
return pol.compileOneViaGrant(grant, users, nodes)
}
// Split destinations into self vs other.
var autogroupSelfDests, otherDests []Alias
for _, dest := range grant.Destinations {
if ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
autogroupSelfDests = append(autogroupSelfDests, dest)
} else {
otherDests = append(otherDests, dest)
}
}
// Resolve sources per-alias, tracking non-wildcard sources
// separately so we can preserve their IPs alongside the
// wildcard CGNAT ranges (matching Tailscale SaaS behavior).
resolvedSrcs, nonWildcardSrcs, err := resolveSources(
pol, grant.Sources, users, nodes,
)
if err != nil {
return nil, err
}
// Literally empty src=[] or dst=[] produces no rules.
if len(grant.Sources) == 0 || len(grant.Destinations) == 0 {
return nil, nil //nolint:nilnil
}
if len(resolvedSrcs) == 0 && grant.App == nil {
return nil, nil //nolint:nilnil
}
hasWildcard := sourcesHaveWildcard(grant.Sources)
hasDangerAll := sourcesHaveDangerAll(grant.Sources)
srcIPStrings := buildSrcIPStrings(
resolvedSrcs, nonWildcardSrcs,
hasWildcard, hasDangerAll, nodes,
)
cg := &compiledGrant{
srcIPStrings: srcIPStrings,
hasWildcard: hasWildcard,
hasDangerAll: hasDangerAll,
}
// Compile non-self destination rules (done once, shared).
if len(otherDests) > 0 {
cg.rules = pol.compileOtherDests(
users, nodes, grant, otherDests,
resolvedSrcs, srcIPStrings,
)
}
// Classify and store deferred self data.
switch {
case len(autogroupSelfDests) > 0:
cg.category = grantCategorySelf
cg.self = &selfGrantData{
resolvedSrcs: resolvedSrcs,
internetProtocols: grant.InternetProtocols,
app: grant.App,
}
default:
cg.category = grantCategoryRegular
}
return cg, nil
}
// compileOneViaGrant resolves sources for a via grant and stores the
// deferred per-node data. The actual via-node matching and route
// intersection happens in compileViaForNode.
func (pol *Policy) compileOneViaGrant(
grant Grant,
users types.Users,
nodes views.Slice[types.NodeView],
) (*compiledGrant, error) {
if len(grant.InternetProtocols) == 0 {
return nil, nil //nolint:nilnil
}
resolvedSrcs, _, err := resolveSources(
pol, grant.Sources, users, nodes,
)
if err != nil {
return nil, err
}
if len(resolvedSrcs) == 0 {
return nil, nil //nolint:nilnil
}
// Build merged SrcIPs.
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, pref := range ips.Prefixes() {
srcIPs.AddPrefix(pref)
}
}
srcResolved, err := newResolved(&srcIPs)
if err != nil {
return nil, err
}
if srcResolved.Empty() {
return nil, nil //nolint:nilnil
}
hasWildcard := sourcesHaveWildcard(grant.Sources)
hasDangerAll := sourcesHaveDangerAll(grant.Sources)
return &compiledGrant{
category: grantCategoryVia,
via: &viaGrantData{
viaTags: grant.Via,
destinations: grant.Destinations,
internetProtocols: grant.InternetProtocols,
srcIPStrings: srcIPsWithRoutes(
srcResolved, hasWildcard, hasDangerAll, nodes,
),
},
}, nil
}
// resolveSources resolves grant sources per-alias, returning the
// resolved addresses and a separate slice of non-wildcard sources.
// This is the canonical source-resolution path. Its output lands in
// compiledGrant.srcIPStrings (among other places) and callers on the
// hot path should prefer reading that over calling Resolve again.
func resolveSources(
pol *Policy,
sources Aliases,
users types.Users,
nodes views.Slice[types.NodeView],
) ([]ResolvedAddresses, []ResolvedAddresses, error) {
var all, nonWild []ResolvedAddresses
for i, src := range sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return nil, nil, errSelfInSources
}
ips, err := src.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).
Msg("resolving source ips")
}
if ips != nil {
all = append(all, ips)
if _, isWildcard := sources[i].(Asterix); !isWildcard {
nonWild = append(nonWild, ips)
}
}
}
return all, nonWild, nil
}
// buildSrcIPStrings builds the final SrcIPs string slice from
// resolved sources, preserving non-wildcard IPs alongside wildcard
// CGNAT ranges to match Tailscale SaaS behavior.
func buildSrcIPStrings(
resolvedSrcs, nonWildcardSrcs []ResolvedAddresses,
hasWildcard, hasDangerAll bool,
nodes views.Slice[types.NodeView],
) []string {
var merged netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, pref := range ips.Prefixes() {
merged.AddPrefix(pref)
}
}
srcResolved, err := newResolved(&merged)
if err != nil {
return nil
}
if srcResolved.Empty() {
return nil
}
srcIPStrs := srcIPsWithRoutes(
srcResolved, hasWildcard, hasDangerAll, nodes,
)
// When sources include a wildcard (*) alongside explicit
// sources (tags, groups, etc.), Tailscale preserves the
// individual IPs from non-wildcard sources alongside the
// merged CGNAT ranges rather than absorbing them.
if hasWildcard && len(nonWildcardSrcs) > 0 {
seen := make(map[string]bool, len(srcIPStrs))
for _, s := range srcIPStrs {
seen[s] = true
}
for _, ips := range nonWildcardSrcs {
for _, s := range ips.Strings() {
if !seen[s] {
seen[s] = true
srcIPStrs = append(srcIPStrs, s)
}
}
}
}
return srcIPStrs
}
// compileOtherDests compiles filter rules for non-self, non-via
// destinations. This produces both DstPorts rules (from
// InternetProtocols) and CapGrant rules (from App).
func (pol *Policy) compileOtherDests(
users types.Users,
nodes views.Slice[types.NodeView],
grant Grant,
otherDests Aliases,
resolvedSrcs []ResolvedAddresses,
srcIPStrings []string,
) []tailcfg.FilterRule {
var rules []tailcfg.FilterRule
// DstPorts rules from InternetProtocols.
for _, ipp := range grant.InternetProtocols {
destPorts := pol.destinationsToNetPortRange(
users, nodes, otherDests, ipp.Ports,
)
if len(destPorts) > 0 && len(srcIPStrings) > 0 {
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPStrings,
DstPorts: destPorts,
IPProto: ipp.Protocol.toIANAProtocolNumbers(),
})
}
}
// CapGrant rules from App.
if grant.App != nil {
capSrcIPStrs := srcIPStrings
// When sources resolved to empty but App is set,
// Tailscale still produces the CapGrant rule with
// empty SrcIPs.
if capSrcIPStrs == nil {
capSrcIPStrs = []string{}
}
var (
capGrants []tailcfg.CapGrant
dstIPStrings []string
)
for _, dst := range otherDests {
ips, err := dst.Resolve(pol, users, nodes)
if err != nil {
continue
}
capGrants = append(capGrants, tailcfg.CapGrant{
Dsts: ips.Prefixes(),
CapMap: grant.App,
})
dstIPStrings = append(dstIPStrings, ips.Strings()...)
}
if len(capGrants) > 0 {
srcPrefixes := make([]netip.Prefix, 0, len(resolvedSrcs)*2)
for _, ips := range resolvedSrcs {
srcPrefixes = append(
srcPrefixes, ips.Prefixes()...,
)
}
rules = append(rules, tailcfg.FilterRule{
SrcIPs: capSrcIPStrs,
CapGrant: capGrants,
})
dstsHaveWildcard := sourcesHaveWildcard(otherDests)
if dstsHaveWildcard {
dstIPStrings = append(
dstIPStrings,
approvedSubnetRoutes(nodes)...,
)
}
rules = append(
rules,
companionCapGrantRules(
dstIPStrings, srcPrefixes, grant.App,
)...,
)
}
}
return rules
}
// hasPerNodeGrants reports whether any compiled grant requires
// per-node filter compilation (via grants or autogroup:self).
func hasPerNodeGrants(grants []compiledGrant) bool {
for i := range grants {
if grants[i].category != grantCategoryRegular {
return true
}
}
return false
}
// globalFilterRules extracts global filter rules from compiled
// grants. Via grants produce no global rules (they are per-node
// only); regular grants contribute their full pre-compiled ruleset;
// self grants contribute their non-self portion.
func globalFilterRules(grants []compiledGrant) []tailcfg.FilterRule {
var rules []tailcfg.FilterRule
for i := range grants {
if grants[i].category == grantCategoryVia {
continue
}
rules = append(rules, grants[i].rules...)
}
return mergeFilterRules(rules)
}
// filterRulesForNode produces unreduced filter rules for a specific
// node by combining pre-compiled global rules with per-node self and
// via rules. Regular grants emit their pre-compiled rules as-is.
// Self grants add autogroup:self expansion. Via grants add
// tag-matched, route-intersected rules.
func filterRulesForNode(
grants []compiledGrant,
node types.NodeView,
userIdx userNodeIndex,
) []tailcfg.FilterRule {
var rules []tailcfg.FilterRule
for i := range grants {
cg := &grants[i]
// Pre-compiled rules apply to all grant categories
// (empty for via-only grants).
rules = append(rules, cg.rules...)
switch cg.category {
case grantCategoryRegular:
// Nothing more to do.
case grantCategorySelf:
rules = append(
rules,
compileAutogroupSelf(cg, node, userIdx)...,
)
case grantCategoryVia:
rules = append(
rules,
compileViaForNode(cg, node)...,
)
}
}
return mergeFilterRules(rules)
}
// compileAutogroupSelf produces filter rules for autogroup:self
// destinations for a specific node. Only called for grants with
// self destinations and only produces rules for untagged nodes.
func compileAutogroupSelf(
cg *compiledGrant,
node types.NodeView,
userIdx userNodeIndex,
) []tailcfg.FilterRule {
if node.IsTagged() || cg.self == nil {
return nil
}
if !node.User().Valid() {
return nil
}
sameUserNodes := userIdx[node.User().ID()]
if len(sameUserNodes) == 0 {
return nil
}
var rules []tailcfg.FilterRule
// Filter sources to only same-user untagged devices.
srcResolved := filterSourcesToSameUser(
cg.self.resolvedSrcs, sameUserNodes,
)
if srcResolved == nil || srcResolved.Empty() {
return nil
}
// DstPorts rules from InternetProtocols.
for _, ipp := range cg.self.internetProtocols {
var destPorts []tailcfg.NetPortRange
for _, n := range sameUserNodes {
for _, port := range ipp.Ports {
for _, ip := range n.IPs() {
destPorts = append(
destPorts,
tailcfg.NetPortRange{
IP: ip.String(),
Ports: port,
},
)
}
}
}
if len(destPorts) > 0 {
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcResolved.Strings(),
DstPorts: destPorts,
IPProto: ipp.Protocol.toIANAProtocolNumbers(),
})
}
}
// CapGrant rules from App.
if cg.self.app != nil {
var (
capGrants []tailcfg.CapGrant
dstIPStrings []string
)
for _, n := range sameUserNodes {
var dsts []netip.Prefix
for _, ip := range n.IPs() {
dsts = append(
dsts,
netip.PrefixFrom(ip, ip.BitLen()),
)
dstIPStrings = append(
dstIPStrings, ip.String(),
)
}
capGrants = append(capGrants, tailcfg.CapGrant{
Dsts: dsts,
CapMap: cg.self.app,
})
}
if len(capGrants) > 0 {
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcResolved.Strings(),
CapGrant: capGrants,
})
rules = append(
rules,
companionCapGrantRules(
dstIPStrings,
srcResolved.Prefixes(),
cg.self.app,
)...,
)
}
}
return rules
}
// filterSourcesToSameUser intersects resolved source addresses with
// same-user untagged device IPs, returning only the addresses that
// belong to those devices.
func filterSourcesToSameUser(
resolvedSrcs []ResolvedAddresses,
sameUserNodes []types.NodeView,
) ResolvedAddresses {
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, n := range sameUserNodes {
if slices.ContainsFunc(n.IPs(), ips.Contains) {
n.AppendToIPSet(&srcIPs)
}
}
}
srcResolved, err := newResolved(&srcIPs)
if err != nil {
return nil
}
return srcResolved
}
// compileViaForNode produces via-grant filter rules for a specific
// node. Only produces rules when the node matches one of the via
// tags and advertises routes that match the grant destinations.
func compileViaForNode(
cg *compiledGrant,
node types.NodeView,
) []tailcfg.FilterRule {
if cg.via == nil {
return nil
}
// Check if node matches any via tag.
matchesVia := false
for _, viaTag := range cg.via.viaTags {
if node.HasTag(string(viaTag)) {
matchesVia = true
break
}
}
if !matchesVia {
return nil
}
// Find matching destination prefixes.
nodeSubnetRoutes := node.SubnetRoutes()
if len(nodeSubnetRoutes) == 0 {
return nil
}
var viaDstPrefixes []netip.Prefix
for _, dst := range cg.via.destinations {
switch d := dst.(type) {
case *Prefix:
dstPrefix := netip.Prefix(*d)
if slices.Contains(nodeSubnetRoutes, dstPrefix) {
viaDstPrefixes = append(
viaDstPrefixes, dstPrefix,
)
}
case *AutoGroup:
// autogroup:internet via grants do not produce
// PacketFilter rules on exit nodes.
}
}
if len(viaDstPrefixes) == 0 {
return nil
}
// Build rules using pre-resolved srcIPStrings.
var rules []tailcfg.FilterRule
for _, ipp := range cg.via.internetProtocols {
var destPorts []tailcfg.NetPortRange
for _, prefix := range viaDstPrefixes {
for _, port := range ipp.Ports {
destPorts = append(
destPorts,
tailcfg.NetPortRange{
IP: prefix.String(),
Ports: port,
},
)
}
}
if len(destPorts) > 0 {
rules = append(rules, tailcfg.FilterRule{
SrcIPs: cg.via.srcIPStrings,
DstPorts: destPorts,
IPProto: ipp.Protocol.toIANAProtocolNumbers(),
})
}
}
return rules
}

View File

@ -1,6 +1,7 @@
package v2
import (
"cmp"
"errors"
"fmt"
"net/netip"
@ -62,7 +63,7 @@ func companionCapGrantRules(
}
slices.SortFunc(pairs, func(a, b pair) int {
return strings.Compare(string(a.original), string(b.original))
return cmp.Compare(a.original, b.original)
})
companions := make([]tailcfg.FilterRule, 0, len(pairs))
@ -141,88 +142,7 @@ func (pol *Policy) compileFilterRules(
return tailcfg.FilterAllowAll, nil
}
var rules []tailcfg.FilterRule
grants := pol.Grants
for _, acl := range pol.ACLs {
grants = append(grants, aclToGrants(acl)...)
}
for _, grant := range grants {
// Via grants are compiled per-node in compileViaGrant,
// not in the global filter set.
if len(grant.Via) > 0 {
continue
}
srcIPs, err := grant.Sources.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving source ips")
}
if srcIPs.Empty() {
continue
}
hasWildcard := sourcesHaveWildcard(grant.Sources)
hasDangerAll := sourcesHaveDangerAll(grant.Sources)
for _, ipp := range grant.InternetProtocols {
destPorts := pol.destinationsToNetPortRange(users, nodes, grant.Destinations, ipp.Ports)
if len(destPorts) > 0 {
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPsWithRoutes(srcIPs, hasWildcard, hasDangerAll, nodes),
DstPorts: destPorts,
IPProto: ipp.Protocol.toIANAProtocolNumbers(),
})
}
}
if grant.App != nil {
var (
capGrants []tailcfg.CapGrant
dstIPStrings []string
)
for _, dst := range grant.Destinations {
ips, err := dst.Resolve(pol, users, nodes)
if err != nil {
continue
}
dstPrefixes := ips.Prefixes()
capGrants = append(capGrants, tailcfg.CapGrant{
Dsts: dstPrefixes,
CapMap: grant.App,
})
dstIPStrings = append(dstIPStrings, ips.Strings()...)
}
srcIPStrs := srcIPsWithRoutes(srcIPs, hasWildcard, hasDangerAll, nodes)
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPStrs,
CapGrant: capGrants,
})
// Companion rules use reversed direction: SrcIPs are
// destination IPs and CapGrant Dsts are source IPs.
// When destinations include a wildcard, add subnet
// routes to companion SrcIPs (same as main rule).
dstsHaveWildcard := sourcesHaveWildcard(grant.Destinations)
if dstsHaveWildcard {
dstIPStrings = append(dstIPStrings, approvedSubnetRoutes(nodes)...)
}
rules = append(
rules,
companionCapGrantRules(dstIPStrings, srcIPs.Prefixes(), grant.App)...,
)
}
}
return mergeFilterRules(rules), nil
return globalFilterRules(pol.compileGrants(users, nodes)), nil
}
func (pol *Policy) destinationsToNetPortRange(
@ -293,509 +213,10 @@ func (pol *Policy) compileFilterRulesForNode(
return tailcfg.FilterAllowAll, nil
}
var rules []tailcfg.FilterRule
grants := pol.compileGrants(users, nodes)
userIdx := buildUserNodeIndex(nodes)
grants := pol.Grants
for _, acl := range pol.ACLs {
grants = append(grants, aclToGrants(acl)...)
}
for _, grant := range grants {
res, err := pol.compileGrantWithAutogroupSelf(grant, users, node, nodes)
if err != nil {
log.Trace().Err(err).Msgf("compiling ACL")
continue
}
rules = append(rules, res...)
}
return mergeFilterRules(rules), nil
}
// compileViaGrant compiles a grant with a "via" field. Via grants
// produce filter rules ONLY on nodes matching a via tag that actually
// advertise (and have approved) the destination subnets. All other
// nodes receive no rules. App-only via grants (no ip field) produce
// no packet filter rules.
func (pol *Policy) compileViaGrant(
grant Grant,
users types.Users,
node types.NodeView,
nodes views.Slice[types.NodeView],
) ([]tailcfg.FilterRule, error) {
// Check if the current node matches any of the via tags.
matchesVia := false
for _, viaTag := range grant.Via {
if node.HasTag(string(viaTag)) {
matchesVia = true
break
}
}
if !matchesVia {
return nil, nil
}
// App-only via grants produce no packet filter rules.
if len(grant.InternetProtocols) == 0 {
return nil, nil
}
// Find which grant destination subnets/exit routes this node actually advertises.
nodeSubnetRoutes := node.SubnetRoutes()
nodeExitRoutes := node.ExitRoutes()
if len(nodeSubnetRoutes) == 0 && len(nodeExitRoutes) == 0 {
return nil, nil
}
// Collect destination prefixes that match the node's approved routes.
var viaDstPrefixes []netip.Prefix
for _, dst := range grant.Destinations {
switch d := dst.(type) {
case *Prefix:
dstPrefix := netip.Prefix(*d)
if slices.Contains(nodeSubnetRoutes, dstPrefix) {
viaDstPrefixes = append(viaDstPrefixes, dstPrefix)
}
case *AutoGroup:
// autogroup:internet via grants do NOT produce PacketFilter rules
// on the exit node. Tailscale SaaS handles exit traffic forwarding
// through the client's exit node selection mechanism (AllowedIPs +
// ExitNodeOption), not through PacketFilter rules. Verified by
// golden captures GRANT-V14 through GRANT-V36.
}
}
if len(viaDstPrefixes) == 0 {
return nil, nil
}
// Resolve source IPs.
var resolvedSrcs []ResolvedAddresses
for _, src := range grant.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return nil, errSelfInSources
}
ips, err := src.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving source ips")
}
if ips != nil {
resolvedSrcs = append(resolvedSrcs, ips)
}
}
if len(resolvedSrcs) == 0 {
return nil, nil
}
// Build merged SrcIPs from all sources.
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, pref := range ips.Prefixes() {
srcIPs.AddPrefix(pref)
}
}
srcResolved, err := newResolved(&srcIPs)
if err != nil {
return nil, err
}
if srcResolved.Empty() {
return nil, nil
}
hasWildcard := sourcesHaveWildcard(grant.Sources)
hasDangerAll := sourcesHaveDangerAll(grant.Sources)
srcIPStrs := srcIPsWithRoutes(srcResolved, hasWildcard, hasDangerAll, nodes)
// Build DstPorts from the matching via prefixes.
var rules []tailcfg.FilterRule
for _, ipp := range grant.InternetProtocols {
var destPorts []tailcfg.NetPortRange
for _, prefix := range viaDstPrefixes {
for _, port := range ipp.Ports {
destPorts = append(destPorts, tailcfg.NetPortRange{
IP: prefix.String(),
Ports: port,
})
}
}
if len(destPorts) > 0 {
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPStrs,
DstPorts: destPorts,
IPProto: ipp.Protocol.toIANAProtocolNumbers(),
})
}
}
return rules, nil
}
// compileGrantWithAutogroupSelf compiles a single Grant rule, handling
// autogroup:self per-node while supporting all other alias types normally.
// It returns a slice of filter rules because when an Grant has both autogroup:self
// and other destinations, they need to be split into separate rules with different
// source filtering logic.
//
//nolint:gocyclo,cyclop // complex ACL compilation logic
func (pol *Policy) compileGrantWithAutogroupSelf(
grant Grant,
users types.Users,
node types.NodeView,
nodes views.Slice[types.NodeView],
) ([]tailcfg.FilterRule, error) {
// Handle via route grants — filter rules only go to the node
// matching the via tag that actually advertises the destination subnets.
if len(grant.Via) > 0 {
return pol.compileViaGrant(grant, users, node, nodes)
}
var (
autogroupSelfDests []Alias
otherDests []Alias
)
for _, dest := range grant.Destinations {
if ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
autogroupSelfDests = append(autogroupSelfDests, dest)
} else {
otherDests = append(otherDests, dest)
}
}
var rules []tailcfg.FilterRule
var resolvedSrcs []ResolvedAddresses
// Track non-wildcard source IPs separately. When the grant has a
// wildcard (*) source plus explicit sources (tags, groups, etc.),
// Tailscale preserves the explicit IPs alongside the wildcard
// CGNAT ranges rather than merging them into the IPSet.
var nonWildcardSrcs []ResolvedAddresses
for i, src := range grant.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return nil, errSelfInSources
}
ips, err := src.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving source ips")
}
if ips != nil {
resolvedSrcs = append(resolvedSrcs, ips)
if _, isWildcard := grant.Sources[i].(Asterix); !isWildcard {
nonWildcardSrcs = append(nonWildcardSrcs, ips)
}
}
}
// When the grant has literally empty src=[] or dst=[], produce no rules
// at all — Tailscale returns null for these. This is distinct from sources
// that resolve to empty (e.g., group:empty) where Tailscale still produces
// CapGrant rules with empty SrcIPs.
if len(grant.Sources) == 0 || len(grant.Destinations) == 0 {
return rules, nil
}
if len(resolvedSrcs) == 0 && grant.App == nil {
return rules, nil
}
hasWildcard := sourcesHaveWildcard(grant.Sources)
hasDangerAll := sourcesHaveDangerAll(grant.Sources)
for _, ipp := range grant.InternetProtocols {
// Handle non-self destinations first to match Tailscale's
// rule ordering in the FilterRule wire format.
if len(otherDests) > 0 {
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, pref := range ips.Prefixes() {
srcIPs.AddPrefix(pref)
}
}
srcResolved, err := newResolved(&srcIPs)
if err != nil {
return nil, err
}
if !srcResolved.Empty() {
destPorts := pol.destinationsToNetPortRange(users, nodes, otherDests, ipp.Ports)
if len(destPorts) > 0 {
srcIPStrs := srcIPsWithRoutes(srcResolved, hasWildcard, hasDangerAll, nodes)
// When sources include a wildcard (*) alongside
// explicit sources (tags, groups, etc.), Tailscale
// preserves the individual IPs from non-wildcard
// sources alongside the merged wildcard CGNAT
// ranges rather than absorbing them.
if hasWildcard && len(nonWildcardSrcs) > 0 {
seen := make(map[string]bool, len(srcIPStrs))
for _, s := range srcIPStrs {
seen[s] = true
}
for _, ips := range nonWildcardSrcs {
for _, s := range ips.Strings() {
if !seen[s] {
seen[s] = true
srcIPStrs = append(srcIPStrs, s)
}
}
}
}
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPStrs,
DstPorts: destPorts,
IPProto: ipp.Protocol.toIANAProtocolNumbers(),
})
}
}
}
// Handle autogroup:self destinations (if any)
// Tagged nodes don't participate in autogroup:self (identity is tag-based, not user-based)
if len(autogroupSelfDests) > 0 && !node.IsTagged() {
// Pre-filter to same-user untagged devices once - reuse for both sources and destinations
sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if !n.IsTagged() && n.User().ID() == node.User().ID() {
sameUserNodes = append(sameUserNodes, n)
}
}
if len(sameUserNodes) > 0 {
// Filter sources to only same-user untagged devices
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, n := range sameUserNodes {
// Check if any of this node's IPs are in the source set
if slices.ContainsFunc(n.IPs(), ips.Contains) {
n.AppendToIPSet(&srcIPs)
}
}
}
srcResolved, err := newResolved(&srcIPs)
if err != nil {
return nil, err
}
if !srcResolved.Empty() {
var destPorts []tailcfg.NetPortRange
for _, n := range sameUserNodes {
for _, port := range ipp.Ports {
for _, ip := range n.IPs() {
destPorts = append(destPorts, tailcfg.NetPortRange{
IP: ip.String(),
Ports: port,
})
}
}
}
if len(destPorts) > 0 {
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcResolved.Strings(),
DstPorts: destPorts,
IPProto: ipp.Protocol.toIANAProtocolNumbers(),
})
}
}
}
}
}
// Handle app grants (CapGrant rules) — these are separate from
// InternetProtocols and produce FilterRules with CapGrant instead
// of DstPorts. A grant with both ip and app fields produces rules
// for each independently.
if grant.App != nil {
// Handle non-self destinations for CapGrant
if len(otherDests) > 0 {
var srcIPStrs []string
if len(resolvedSrcs) > 0 {
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, pref := range ips.Prefixes() {
srcIPs.AddPrefix(pref)
}
}
srcResolved, err := newResolved(&srcIPs)
if err != nil {
return nil, err
}
if !srcResolved.Empty() {
srcIPStrs = srcIPsWithRoutes(srcResolved, hasWildcard, hasDangerAll, nodes)
if hasWildcard && len(nonWildcardSrcs) > 0 {
seen := make(map[string]bool, len(srcIPStrs))
for _, s := range srcIPStrs {
seen[s] = true
}
for _, ips := range nonWildcardSrcs {
for _, s := range ips.Strings() {
if !seen[s] {
seen[s] = true
srcIPStrs = append(srcIPStrs, s)
}
}
}
}
}
}
var (
capGrants []tailcfg.CapGrant
dstIPStrings []string
)
for _, dst := range otherDests {
ips, err := dst.Resolve(pol, users, nodes)
if err != nil {
continue
}
capGrants = append(capGrants, tailcfg.CapGrant{
Dsts: ips.Prefixes(),
CapMap: grant.App,
})
dstIPStrings = append(dstIPStrings, ips.Strings()...)
}
if len(capGrants) > 0 {
// When sources resolved to empty (e.g. empty group),
// Tailscale still produces the CapGrant rule with
// empty SrcIPs.
if srcIPStrs == nil {
srcIPStrs = []string{}
}
// Collect source prefixes for reversed companion rules.
var srcPrefixes []netip.Prefix
for _, ips := range resolvedSrcs {
srcPrefixes = append(srcPrefixes, ips.Prefixes()...)
}
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPStrs,
CapGrant: capGrants,
})
// Companion rules use reversed direction: companion
// SrcIPs are the destination IPs. When destinations
// include a wildcard, add subnet routes to companion
// SrcIPs to match main rule behavior.
dstsHaveWildcard := sourcesHaveWildcard(otherDests)
if dstsHaveWildcard {
dstIPStrings = append(dstIPStrings, approvedSubnetRoutes(nodes)...)
}
rules = append(
rules,
companionCapGrantRules(dstIPStrings, srcPrefixes, grant.App)...,
)
}
}
// Handle autogroup:self destinations for CapGrant
if len(autogroupSelfDests) > 0 && !node.IsTagged() {
sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if !n.IsTagged() && n.User().ID() == node.User().ID() {
sameUserNodes = append(sameUserNodes, n)
}
}
if len(sameUserNodes) > 0 {
var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcs {
for _, n := range sameUserNodes {
if slices.ContainsFunc(n.IPs(), ips.Contains) {
n.AppendToIPSet(&srcIPs)
}
}
}
srcResolved, err := newResolved(&srcIPs)
if err != nil {
return nil, err
}
if !srcResolved.Empty() {
var (
capGrants []tailcfg.CapGrant
dstIPStrings []string
)
for _, n := range sameUserNodes {
var dsts []netip.Prefix
for _, ip := range n.IPs() {
dsts = append(
dsts,
netip.PrefixFrom(ip, ip.BitLen()),
)
dstIPStrings = append(dstIPStrings, ip.String())
}
capGrants = append(capGrants, tailcfg.CapGrant{
Dsts: dsts,
CapMap: grant.App,
})
}
if len(capGrants) > 0 {
srcIPStrs := srcResolved.Strings()
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPStrs,
CapGrant: capGrants,
})
rules = append(
rules,
companionCapGrantRules(
dstIPStrings,
srcResolved.Prefixes(),
grant.App,
)...,
)
}
}
}
}
}
return rules, nil
return filterRulesForNode(grants, node, userIdx), nil
}
var sshAccept = tailcfg.SSHAction{
@ -809,6 +230,8 @@ var sshAccept = tailcfg.SSHAction{
// checkPeriodFromRule extracts the check period duration from an SSH rule.
// Returns SSHCheckPeriodDefault if no checkPeriod is configured,
// 0 if checkPeriod is "always", or the configured duration otherwise.
// This is used server-side by SSHCheckParams to resolve the real period
// when the client calls back; the wire format always sends 0.
func checkPeriodFromRule(rule SSH) time.Duration {
switch {
case rule.CheckPeriod == nil:
@ -820,13 +243,13 @@ func checkPeriodFromRule(rule SSH) time.Duration {
}
}
func sshCheck(baseURL string, duration time.Duration) tailcfg.SSHAction {
holdURL := baseURL + "/machine/ssh/action/from/$SRC_NODE_ID/to/$DST_NODE_ID?ssh_user=$SSH_USER&local_user=$LOCAL_USER"
func sshCheck(baseURL string, _ time.Duration) tailcfg.SSHAction {
holdURL := baseURL + "/machine/ssh/action/$SRC_NODE_ID/to/$DST_NODE_ID?local_user=$LOCAL_USER"
return tailcfg.SSHAction{
Reject: false,
Accept: false,
SessionDuration: duration,
SessionDuration: 0,
// Replaced in the client:
// * $SRC_NODE_IP (URL escaped)
// * $SRC_NODE_ID (Node.ID as int64 string)
@ -835,9 +258,9 @@ func sshCheck(baseURL string, duration time.Duration) tailcfg.SSHAction {
// * $SSH_USER (URL escaped, ssh user requested)
// * $LOCAL_USER (URL escaped, local user mapped)
HoldAndDelegate: holdURL,
AllowAgentForwarding: true,
AllowLocalPortForwarding: true,
AllowRemotePortForwarding: true,
AllowAgentForwarding: false,
AllowLocalPortForwarding: false,
AllowRemotePortForwarding: false,
}
}
@ -994,9 +417,20 @@ func (pol *Policy) compileSSHPolicy(
appendRules(taggedPrincipals, 0, false)
}
} else {
if principals := resolvedAddrsToPrincipals(srcIPs); len(principals) > 0 {
// Merge user and tagged principals into a
// single list. Tagged principals preserve
// per-tag duplication (a node with N tags
// appears N times, matching SaaS behavior).
var allPrincipals []*tailcfg.SSHPrincipal
for _, uid := range userIDs {
allPrincipals = append(allPrincipals, principalsByUser[uid]...)
}
allPrincipals = append(allPrincipals, taggedPrincipals...)
if len(allPrincipals) > 0 {
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
Principals: allPrincipals,
SSHUsers: baseUserMap,
Action: &action,
AcceptEnv: acceptEnv,
@ -1137,9 +571,7 @@ func groupSourcesByUser(
) ([]uint, map[uint][]*tailcfg.SSHPrincipal, []*tailcfg.SSHPrincipal) {
userIPSets := make(map[uint]*netipx.IPSetBuilder)
var taggedIPSet netipx.IPSetBuilder
hasTagged := false
var taggedPrincipals []*tailcfg.SSHPrincipal
for _, n := range nodes.All() {
if !slices.ContainsFunc(n.IPs(), srcIPs.Contains) {
@ -1147,9 +579,17 @@ func groupSourcesByUser(
}
if n.IsTagged() {
n.AppendToIPSet(&taggedIPSet)
hasTagged = true
// Tailscale SaaS resolves autogroup:tagged by
// iterating tag membership lists. A node with N
// tags produces N copies of its IPs in the
// principal list. Match that behavior so the SSH
// wire format is identical.
for range n.Tags().Len() {
for _, ip := range n.IPs() {
taggedPrincipals = append(taggedPrincipals,
&tailcfg.SSHPrincipal{NodeIP: ip.String()})
}
}
continue
}
@ -1185,16 +625,7 @@ func groupSourcesByUser(
slices.Sort(userIDs)
var tagged []*tailcfg.SSHPrincipal
if hasTagged {
taggedSet, err := taggedIPSet.IPSet()
if err == nil && taggedSet != nil {
tagged = ipSetToPrincipals(taggedSet)
}
}
return userIDs, principalsByUser, tagged
return userIDs, principalsByUser, taggedPrincipals
}
func ipSetToPrefixStringList(ips *netipx.IPSet) []string {

View File

@ -1071,7 +1071,7 @@ func TestCompileSSHPolicy_CheckAction(t *testing.T) {
assert.False(t, rule.Action.Reject)
assert.NotEmpty(t, rule.Action.HoldAndDelegate)
assert.Contains(t, rule.Action.HoldAndDelegate, "/machine/ssh/action/")
assert.Equal(t, 24*time.Hour, rule.Action.SessionDuration)
assert.Equal(t, time.Duration(0), rule.Action.SessionDuration)
// Verify check params are NOT encoded in the URL (looked up server-side).
assert.NotContains(t, rule.Action.HoldAndDelegate, "check_explicit")
@ -2632,25 +2632,24 @@ func TestCompileSSHPolicy_CheckPeriodVariants(t *testing.T) {
nodes := types.Nodes{&node}
// SaaS always sends SessionDuration=0 in the wire format
// regardless of checkPeriod. The check period is resolved
// server-side, not embedded in the SSHAction.
tests := []struct {
name string
checkPeriod *SSHCheckPeriod
wantDuration time.Duration
name string
checkPeriod *SSHCheckPeriod
}{
{
name: "nil period defaults to 12h",
checkPeriod: nil,
wantDuration: SSHCheckPeriodDefault,
name: "nil period",
checkPeriod: nil,
},
{
name: "always period uses 0",
checkPeriod: &SSHCheckPeriod{Always: true},
wantDuration: 0,
name: "always period",
checkPeriod: &SSHCheckPeriod{Always: true},
},
{
name: "explicit 2h",
checkPeriod: &SSHCheckPeriod{Duration: 2 * time.Hour},
wantDuration: 2 * time.Hour,
name: "explicit 2h",
checkPeriod: &SSHCheckPeriod{Duration: 2 * time.Hour},
},
}
@ -2682,7 +2681,7 @@ func TestCompileSSHPolicy_CheckPeriodVariants(t *testing.T) {
require.Len(t, sshPolicy.Rules, 1)
rule := sshPolicy.Rules[0]
assert.Equal(t, tt.wantDuration, rule.Action.SessionDuration)
assert.Equal(t, time.Duration(0), rule.Action.SessionDuration)
// Check params must NOT be in the URL; they are
// resolved server-side via SSHCheckParams.
assert.NotContains(t, rule.Action.HoldAndDelegate, "check_explicit")
@ -3832,7 +3831,7 @@ func TestCompileViaGrant(t *testing.T) {
nodeView := tt.node.View()
nodesSlice := tt.nodes.ViewSlice()
got, err := tt.pol.compileViaGrant(tt.grant, users, nodeView, nodesSlice)
cg, err := tt.pol.compileOneGrant(tt.grant, users, nodesSlice)
if tt.wantErr != nil {
require.ErrorIs(t, err, tt.wantErr)
@ -3842,6 +3841,11 @@ func TestCompileViaGrant(t *testing.T) {
require.NoError(t, err)
var got []tailcfg.FilterRule
if cg != nil {
got = compileViaForNode(cg, nodeView)
}
if tt.name == "wildcard sources include subnet routes in SrcIPs" {
// Wildcard resolves to CGNAT ranges; just check the route is appended.
require.Len(t, got, 1)
@ -3997,9 +4001,10 @@ func TestCompileGrantWithAutogroupSelf_GrantPaths(t *testing.T) {
nodeView := tt.node.View()
nodesSlice := allNodes.ViewSlice()
userIdx := buildUserNodeIndex(nodesSlice)
got, err := tt.pol.compileGrantWithAutogroupSelf(
tt.grant, users, nodeView, nodesSlice,
cg, err := tt.pol.compileOneGrant(
tt.grant, users, nodesSlice,
)
if tt.wantErr != nil {
@ -4010,6 +4015,13 @@ func TestCompileGrantWithAutogroupSelf_GrantPaths(t *testing.T) {
require.NoError(t, err)
var got []tailcfg.FilterRule
if cg != nil {
got = append(got, cg.rules...)
got = append(got, compileAutogroupSelf(cg, nodeView, userIdx)...)
got = mergeFilterRules(got)
}
switch tt.name {
case "autogroup:self destination for untagged node produces same-user devices":
// Should produce rules; sources and destinations should only

View File

@ -46,16 +46,22 @@ type PolicyManager struct {
// Lazy map of SSH policies
sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy
// Lazy map of per-node compiled filter rules (unreduced, for autogroup:self)
compiledFilterRulesMap map[types.NodeID][]tailcfg.FilterRule
// Lazy map of per-node filter rules (reduced, for packet filters)
filterRulesMap map[types.NodeID][]tailcfg.FilterRule
usesAutogroupSelf bool
// compiledGrants are the grants with sources pre-resolved.
// The single source of truth for filter compilation. Both
// global and per-node filter rules are derived from these.
compiledGrants []compiledGrant
userNodeIdx userNodeIndex
// needsPerNodeFilter is true when filter rules must be compiled
// per-node rather than globally. This is required when the policy
// uses autogroup:self (node-relative destinations) or via grants
// (per-router filter rules for steered traffic).
// Lazy map of per-node filter rules (reduced, for packet filters)
filterRulesMap map[types.NodeID][]tailcfg.FilterRule
// Lazy map of per-node matchers derived from UNREDUCED filter
// rules. Only populated on the slow path when needsPerNodeFilter
// is true; the fast path returns pm.matchers directly.
matchersForNodeMap map[types.NodeID][]matcher.Match
// needsPerNodeFilter is true when any compiled grant requires
// per-node work (autogroup:self or via grants).
needsPerNodeFilter bool
}
@ -77,14 +83,12 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node
}
pm := PolicyManager{
pol: policy,
users: users,
nodes: nodes,
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
compiledFilterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
usesAutogroupSelf: policy.usesAutogroupSelf(),
needsPerNodeFilter: policy.usesAutogroupSelf() || policy.hasViaGrants(),
pol: policy,
users: users,
nodes: nodes,
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
matchersForNodeMap: make(map[types.NodeID][]matcher.Match, nodes.Len()),
}
_, err = pm.updateLocked()
@ -98,18 +102,17 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node
// updateLocked updates the filter rules based on the current policy and nodes.
// It must be called with the lock held.
func (pm *PolicyManager) updateLocked() (bool, error) {
// Check if policy uses autogroup:self or via grants
pm.usesAutogroupSelf = pm.pol.usesAutogroupSelf()
pm.needsPerNodeFilter = pm.usesAutogroupSelf || pm.pol.hasViaGrants()
// Compile all grants once. Both global and per-node filter
// rules are derived from these compiled grants.
pm.compiledGrants = pm.pol.compileGrants(pm.users, pm.nodes)
pm.userNodeIdx = buildUserNodeIndex(pm.nodes)
pm.needsPerNodeFilter = hasPerNodeGrants(pm.compiledGrants)
var filter []tailcfg.FilterRule
var err error
// Standard compilation for all policies
filter, err = pm.pol.compileFilterRules(pm.users, pm.nodes)
if err != nil {
return false, fmt.Errorf("compiling filter rules: %w", err)
if pm.pol == nil || (pm.pol.ACLs == nil && len(pm.pol.Grants) == 0) {
filter = tailcfg.FilterAllowAll
} else {
filter = globalFilterRules(pm.compiledGrants)
}
// Hash both the compiled filter AND the policy content together.
@ -209,8 +212,8 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
// policies for nodes that have changed. Particularly if the only difference is
// that nodes has been added or removed.
clear(pm.sshPolicyMap)
clear(pm.compiledFilterRulesMap)
clear(pm.filterRulesMap)
clear(pm.matchersForNodeMap)
}
// If nothing changed, no need to update nodes
@ -231,6 +234,11 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
return true, nil
}
// SSHPolicy returns the tailcfg.SSHPolicy for node, compiling and
// caching on first access. Rules use SessionDuration = 0 (no
// auto-approval) and emit check URLs of the form
// /machine/ssh/action/{src}/to/{dst}?local_user={local_user} per the
// SaaS wire format. Cache is invalidated on policy reload.
func (pm *PolicyManager) SSHPolicy(baseURL string, node types.NodeView) (*tailcfg.SSHPolicy, error) {
pm.mu.Lock()
defer pm.mu.Unlock()
@ -251,9 +259,12 @@ func (pm *PolicyManager) SSHPolicy(baseURL string, node types.NodeView) (*tailcf
// SSHCheckParams resolves the SSH check period for a source-destination
// node pair by looking up the current policy. This avoids trusting URL
// parameters that a client could tamper with.
// It returns the check period duration and whether a matching check
// rule was found.
// parameters that a client could tamper with. First-match wins across
// the policy's SSH rules.
//
// Returns (duration, true) when a matching rule is found and
// (0, false) when none is. A (0, true) return means the matched rule
// uses a zero check period (re-check every session).
func (pm *PolicyManager) SSHCheckParams(
srcNodeID, dstNodeID types.NodeID,
) (time.Duration, bool) {
@ -414,16 +425,8 @@ func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[typ
// but peer relationships require the full bidirectional access rules.
nodeMatchers := make(map[types.NodeID][]matcher.Match, nodes.Len())
for _, node := range nodes.All() {
filter, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
continue
}
// Include all nodes in nodeMatchers, even those with empty filters.
// Empty filters result in empty matchers where CanAccess() returns false,
// but the node still needs to be in the map so hasFilterX is true.
// This ensures symmetric visibility works correctly: if node A can access
// node B, both should see each other regardless of B's filter rules.
nodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(filter)
unreduced := pm.filterRulesForNodeLocked(node)
nodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(unreduced)
}
// Check each node pair for peer relationships.
@ -464,81 +467,59 @@ func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[typ
return ret
}
// compileFilterRulesForNodeLocked returns the unreduced compiled filter rules for a node
// when using autogroup:self. This is used by BuildPeerMap to determine peer relationships.
// For packet filters sent to nodes, use filterForNodeLocked which returns reduced rules.
func (pm *PolicyManager) compileFilterRulesForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
// Check if we have cached compiled rules
if rules, ok := pm.compiledFilterRulesMap[node.ID()]; ok {
return rules, nil
}
// Compile per-node rules with autogroup:self expanded
rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)
if err != nil {
return nil, fmt.Errorf("compiling filter rules for node: %w", err)
}
// Cache the unreduced compiled rules
pm.compiledFilterRulesMap[node.ID()] = rules
return rules, nil
// filterRulesForNodeLocked returns the unreduced compiled filter rules
// for a node, combining pre-compiled global rules with per-node self
// and via rules from the stored compiled grants.
func (pm *PolicyManager) filterRulesForNodeLocked(
node types.NodeView,
) []tailcfg.FilterRule {
return filterRulesForNode(
pm.compiledGrants, node, pm.userNodeIdx,
)
}
// filterForNodeLocked returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// This is a lock-free version of FilterForNode for internal use when the lock is already held.
// BuildPeerMap already holds the lock, so we need a version that doesn't re-acquire it.
func (pm *PolicyManager) filterForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
// filterForNodeLocked returns the filter rules for a specific node,
// already reduced to only include rules relevant to that node.
//
// Fast path (!needsPerNodeFilter): reduces global filter per-node.
// Slow path (needsPerNodeFilter): combines global + self + via rules
// from the stored compiled grants, then reduces.
//
// Both paths derive from the same compiledGrants, ensuring there is
// no divergence between global and per-node filter output.
//
// Lock-free version for internal use when the lock is already held.
func (pm *PolicyManager) filterForNodeLocked(
node types.NodeView,
) []tailcfg.FilterRule {
if pm == nil {
return nil, nil
return nil
}
if !pm.needsPerNodeFilter {
// For global filters, reduce to only rules relevant to this node.
// Cache the reduced filter per node for efficiency.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
// Use policyutil.ReduceFilterRules for global filter reduction.
reducedFilter := policyutil.ReduceFilterRules(node, pm.filter)
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
}
// Per-node compilation is needed when the policy uses autogroup:self
// (node-relative destinations) or via grants (per-router filter rules).
// Check if we have cached reduced rules for this node.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
return rules
}
// Get unreduced compiled rules
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, err
var unreduced []tailcfg.FilterRule
if !pm.needsPerNodeFilter {
unreduced = pm.filter
} else {
unreduced = pm.filterRulesForNodeLocked(node)
}
// Reduce the compiled rules to only destinations relevant to this node
reducedFilter := policyutil.ReduceFilterRules(node, compiledRules)
reduced := policyutil.ReduceFilterRules(node, unreduced)
pm.filterRulesMap[node.ID()] = reduced
// Cache the reduced filter
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
return reduced
}
// FilterForNode returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// If the policy uses autogroup:self, this returns node-specific compiled rules.
// Otherwise, it returns the global filter reduced for this node.
//
// Cache is invalidated by updateLocked on policy reload, node-set
// change, or tag-state change.
func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
@ -547,7 +528,7 @@ func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRul
pm.mu.Lock()
defer pm.mu.Unlock()
return pm.filterForNodeLocked(node)
return pm.filterForNodeLocked(node), nil
}
// MatchersForNode returns the matchers for peer relationship determination for a specific node.
@ -556,6 +537,10 @@ func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRul
//
// For global policies: returns the global matchers (same for all nodes)
// For autogroup:self: returns node-specific matchers from unreduced compiled rules.
//
// Per-node results are cached and invalidated on policy/node updates
// so BuildPeerMap's O(N²) slow path avoids recomputing matchers for
// every pair.
func (pm *PolicyManager) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {
if pm == nil {
return nil, nil
@ -571,14 +556,17 @@ func (pm *PolicyManager) MatchersForNode(node types.NodeView) ([]matcher.Match,
return pm.matchers, nil
}
// For autogroup:self or via grants, get unreduced compiled rules and create matchers
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, err
if cached, ok := pm.matchersForNodeMap[node.ID()]; ok {
return cached, nil
}
// Create matchers from unreduced rules for peer relationship determination
return matcher.MatchesFromFilterRules(compiledRules), nil
// For autogroup:self or via grants, derive matchers from
// the stored compiled grants for this specific node.
unreduced := pm.filterRulesForNodeLocked(node)
matchers := matcher.MatchesFromFilterRules(unreduced)
pm.matchersForNodeMap[node.ID()] = matchers
return matchers, nil
}
// SetUsers updates the users in the policy manager and updates the filter rules.
@ -649,8 +637,8 @@ func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, erro
if !needsUpdate {
// This ensures fresh filter rules are generated for all nodes
clear(pm.sshPolicyMap)
clear(pm.compiledFilterRulesMap)
clear(pm.filterRulesMap)
clear(pm.matchersForNodeMap)
}
// Always return true when nodes changed, even if filter hash didn't change
// (can happen with autogroup:self or when nodes are added but don't affect rules)
@ -799,6 +787,9 @@ func (pm *PolicyManager) NodeCanApproveRoute(node types.NodeView, route netip.Pr
return false
}
pm.mu.Lock()
defer pm.mu.Unlock()
// If the route to-be-approved is an exit route, then we need to check
// if the node is in allowed to approve it. This is treated differently
// than the auto-approvers, as the auto-approvers are not allowed to
@ -810,16 +801,9 @@ func (pm *PolicyManager) NodeCanApproveRoute(node types.NodeView, route netip.Pr
return false
}
if slices.ContainsFunc(node.IPs(), pm.exitSet.Contains) {
return true
}
return false
return slices.ContainsFunc(node.IPs(), pm.exitSet.Contains)
}
pm.mu.Lock()
defer pm.mu.Unlock()
// The fast path is that a node requests to approve a prefix
// where there is an exact entry, e.g. 10.0.0.0/8, then
// check and return quickly
@ -852,6 +836,10 @@ func (pm *PolicyManager) NodeCanApproveRoute(node types.NodeView, route netip.Pr
// For each via grant where the viewer matches the source, it checks whether the
// peer advertises any of the grant's destination prefixes. If the peer has the
// via tag, those prefixes go into Include; otherwise into Exclude.
//
// Performance note: this holds pm.mu for its full duration. Hot
// callers should memoise by (policy-hash, viewer-id) rather than
// invoking this per-pair.
func (pm *PolicyManager) ViaRoutesForPeer(viewer, peer types.NodeView) types.ViaRouteResult {
var result types.ViaRouteResult
@ -872,28 +860,33 @@ func (pm *PolicyManager) ViaRoutesForPeer(viewer, peer types.NodeView) types.Via
grants = append(grants, aclToGrants(acl)...)
}
for _, grant := range grants {
if len(grant.Via) == 0 {
continue
}
// Check if viewer matches any grant source.
viewerMatches := false
// Resolve each grant's sources against the viewer once. The three
// passes below reuse this result instead of calling src.Resolve
// per grant per pass.
viewerIPs := viewer.IPs()
viewerMatchesGrant := make([]bool, len(grants))
for i, grant := range grants {
for _, src := range grant.Sources {
ips, err := src.Resolve(pm.pol, pm.users, pm.nodes)
if err != nil {
continue
}
if ips != nil && slices.ContainsFunc(viewer.IPs(), ips.Contains) {
viewerMatches = true
if ips != nil && slices.ContainsFunc(viewerIPs, ips.Contains) {
viewerMatchesGrant[i] = true
break
}
}
}
if !viewerMatches {
for i, grant := range grants {
if len(grant.Via) == 0 {
continue
}
if !viewerMatchesGrant[i] {
continue
}
@ -913,8 +906,7 @@ func (pm *PolicyManager) ViaRoutesForPeer(viewer, peer types.NodeView) types.Via
// autogroup:internet via grants do NOT affect AllowedIPs or
// route steering for exit nodes. Tailscale SaaS handles exit
// traffic forwarding through the client's exit node selection
// mechanism, not through AllowedIPs. Verified by golden
// captures GRANT-V14 through GRANT-V36.
// mechanism, not through AllowedIPs.
}
}
@ -940,6 +932,113 @@ func (pm *PolicyManager) ViaRoutesForPeer(viewer, peer types.NodeView) types.Via
}
}
// Detect prefixes that should fall back to HA primary election
// rather than per-viewer via steering. Two conditions trigger this:
//
// 1. Multi-router via: a via grant's tag matches multiple peers
// advertising the same prefix.
// 2. Regular grant overlap: a non-via grant also covers the same
// prefix for this viewer.
//
// When neither condition is met, per-viewer via steering applies.
if len(result.Include) > 0 || len(result.Exclude) > 0 {
// Multi-router via election: when a via grant's tag matches
// multiple peers advertising the same prefix, only the
// lowest-ID peer (the via-group primary) keeps the prefix in
// Include. The others move to Exclude. This mirrors HA
// primary election scoped to the via tag group.
//
// Unlike the global PrimaryRoutes election (routes/primary.go),
// which picks one primary across ALL advertisers of a prefix,
// this election is scoped to the via tag. Two via grants with
// different tags (e.g., tag:ha-a vs tag:ha-b) each elect their
// own winner independently.
//
// Only process via grants where the viewer matches the source,
// otherwise grants for other viewer groups would incorrectly
// demote the peer.
for i, grant := range grants {
if len(grant.Via) == 0 {
continue
}
if !viewerMatchesGrant[i] {
continue
}
for _, dst := range grant.Destinations {
d, ok := dst.(*Prefix)
if !ok {
continue
}
dstPrefix := netip.Prefix(*d)
if !slices.Contains(result.Include, dstPrefix) {
continue
}
// Find the lowest-ID peer with this via tag that
// advertises this prefix — the via-group primary.
var viaPrimaryID types.NodeID
for _, viaTag := range grant.Via {
for _, node := range pm.nodes.All() {
if node.HasTag(string(viaTag)) &&
slices.Contains(node.SubnetRoutes(), dstPrefix) {
if viaPrimaryID == 0 || node.ID() < viaPrimaryID {
viaPrimaryID = node.ID()
}
}
}
}
// If the current peer is not the via-group primary,
// demote the prefix from Include to Exclude.
if viaPrimaryID != 0 && peer.ID() != viaPrimaryID {
result.Include = slices.DeleteFunc(result.Include, func(p netip.Prefix) bool {
return p == dstPrefix
})
if !slices.Contains(result.Exclude, dstPrefix) {
result.Exclude = append(result.Exclude, dstPrefix)
}
}
}
}
// Check for regular (non-via) grants covering the same prefix.
// When a regular grant also covers a prefix that a via grant
// included, defer to global HA primary election (UsePrimary).
// When a regular grant covers a prefix that a via grant excluded
// (peer lacks via tag), remove the exclusion so RoutesForPeer
// can apply normal ReduceRoutes + primary logic.
for i, grant := range grants {
if len(grant.Via) > 0 {
continue
}
if !viewerMatchesGrant[i] {
continue
}
for _, dst := range grant.Destinations {
if d, ok := dst.(*Prefix); ok {
dstPrefix := netip.Prefix(*d)
if slices.Contains(result.Include, dstPrefix) &&
!slices.Contains(result.UsePrimary, dstPrefix) {
result.UsePrimary = append(result.UsePrimary, dstPrefix)
}
// A regular grant overrides a via exclusion: the
// peer doesn't need the via tag if the viewer has
// direct (non-via) access to the prefix.
result.Exclude = slices.DeleteFunc(result.Exclude, func(p netip.Prefix) bool {
return p == dstPrefix
})
}
}
}
}
return result
}
@ -1151,13 +1250,13 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
// If we found the user and they're affected, clear this cache entry
if found {
if _, affected := affectedUsers[nodeUserID]; affected {
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
delete(pm.matchersForNodeMap, nodeID)
}
} else {
// Node not found in either old or new list, clear it
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
delete(pm.matchersForNodeMap, nodeID)
}
}
@ -1171,13 +1270,14 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
// invalidateNodeCache invalidates cache entries based on what changed.
func (pm *PolicyManager) invalidateNodeCache(newNodes views.Slice[types.NodeView]) {
if pm.usesAutogroupSelf {
// For autogroup:self, a node's filter depends on its peers (same user).
// When any node in a user changes, all nodes for that user need invalidation.
if pm.needsPerNodeFilter {
// For autogroup:self or via grants, a node's filter depends
// on its peers. When any node changes, invalidate affected
// users' caches.
pm.invalidateAutogroupSelfCache(pm.nodes, newNodes)
} else {
// For global policies, a node's filter depends only on its own properties.
// Only invalidate nodes whose properties actually changed.
// For global policies, a node's filter depends only on its
// own properties. Only invalidate changed nodes.
pm.invalidateGlobalPolicyCache(newNodes)
}
}
@ -1205,6 +1305,7 @@ func (pm *PolicyManager) invalidateGlobalPolicyCache(newNodes views.Slice[types.
if newNode.HasNetworkChanges(oldNode) {
delete(pm.filterRulesMap, nodeID)
delete(pm.matchersForNodeMap, nodeID)
}
}
@ -1214,6 +1315,12 @@ func (pm *PolicyManager) invalidateGlobalPolicyCache(newNodes views.Slice[types.
delete(pm.filterRulesMap, nodeID)
}
}
for nodeID := range pm.matchersForNodeMap {
if _, exists := newNodeMap[nodeID]; !exists {
delete(pm.matchersForNodeMap, nodeID)
}
}
}
// flattenTags flattens the TagOwners by resolving nested tags and detecting cycles.

View File

@ -355,9 +355,8 @@ func TestInvalidateGlobalPolicyCache(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pm := &PolicyManager{
nodes: tt.oldNodes.ViewSlice(),
filterRulesMap: tt.initialCache,
usesAutogroupSelf: false,
nodes: tt.oldNodes.ViewSlice(),
filterRulesMap: tt.initialCache,
}
pm.invalidateGlobalPolicyCache(tt.newNodes.ViewSlice())
@ -399,7 +398,7 @@ func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
pm, err := NewPolicyManager([]byte(policyStr), users, nodes.ViewSlice())
require.NoError(t, err)
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
require.True(t, pm.needsPerNodeFilter, "policy should need per-node filter")
// Test FilterForNode returns reduced rules
// For node1: should have rules where node1 is in destinations (its own IP)
@ -572,7 +571,7 @@ func TestAutogroupSelfPolicyUpdateTriggersMapResponse(t *testing.T) {
pm, err := NewPolicyManager([]byte(initialPolicy), users, nodes.ViewSlice())
require.NoError(t, err)
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
require.True(t, pm.needsPerNodeFilter, "policy should need per-node filter")
// Get initial filter rules for test-1 (should be cached)
rules1, err := pm.FilterForNode(test1Node.View())

View File

@ -67,8 +67,8 @@ var (
// Grant validation errors.
var (
ErrGrantMissingIPOrApp = errors.New("grants must specify either 'ip' or 'app' field")
ErrGrantInvalidViaTag = errors.New("grant 'via' tag is not defined in policy")
ErrGrantMissingIPOrApp = errors.New("ip and app can not both be empty")
ErrGrantViaNotATag = errors.New("via can only be a tag")
ErrProtocolPortInvalidFormat = errors.New("expected only one colon in Internet protocol and port type")
ErrCapNameInvalidForm = errors.New("capability name must have the form {domain}/{path}")
ErrCapNameTailscaleDomain = errors.New("capability name must not be in the tailscale.com domain")
@ -2550,7 +2550,7 @@ func (p *Policy) validate() error {
err := p.TagOwners.Contains(tagOwner)
if err != nil {
errs = append(errs, err)
errs = append(errs, fmt.Errorf("src=%w", err))
}
}
}
@ -2587,11 +2587,14 @@ func (p *Policy) validate() error {
}
}
// Validate via tags
// Validate via tags. Wording matches Tailscale SaaS
// ("tag %q not found"), which differs from the ACL-src
// wording ("src=tag not found: %q").
for _, viaTag := range grant.Via {
err := p.TagOwners.Contains(&viaTag)
if err != nil {
errs = append(errs, fmt.Errorf("%w in grant via: %q", ErrGrantInvalidViaTag, viaTag))
//nolint:err113 // SaaS-aligned dynamic phrasing; no caller does errors.Is.
errs = append(errs, fmt.Errorf("tag %q not found", viaTag))
}
}
@ -3001,11 +3004,19 @@ func unmarshalPolicy(b []byte) (*Policy, error) {
ast.Standardize()
if err = json.Unmarshal(ast.Pack(), &policy, policyJSONOpts...); err != nil { //nolint:noinlineerr
if serr, ok := errors.AsType[*json.SemanticError](err); ok && errors.Is(serr.Err, json.ErrUnknownName) {
ptr := serr.JSONPointer
name := ptr.LastToken()
if serr, ok := errors.AsType[*json.SemanticError](err); ok {
if errors.Is(serr.Err, json.ErrUnknownName) {
ptr := serr.JSONPointer
name := ptr.LastToken()
return nil, fmt.Errorf("%w: %q", ErrUnknownField, name)
return nil, fmt.Errorf("%w: %q", ErrUnknownField, name)
}
// Non-tag entries in grant.via surface as type errors on
// []Tag; match SaaS wording instead of Go's JSON diagnostic.
if strings.Contains(string(serr.JSONPointer), "/via/") {
return nil, ErrGrantViaNotATag
}
}
return nil, fmt.Errorf("parsing policy from bytes: %w", err)
@ -3040,58 +3051,3 @@ func validateProtocolPortCompatibility(protocol Protocol, destinations []AliasWi
return nil
}
// usesAutogroupSelf checks if the policy uses autogroup:self in any ACL or SSH rules.
func (p *Policy) usesAutogroupSelf() bool {
if p == nil {
return false
}
// Check ACL rules
for _, acl := range p.ACLs {
for _, src := range acl.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
for _, dest := range acl.Destinations {
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
}
// Check SSH rules
for _, ssh := range p.SSHs {
for _, src := range ssh.Sources {
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
for _, dest := range ssh.Destinations {
if ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return true
}
}
}
return false
}
// hasViaGrants returns true if any grant in the policy has a
// non-empty Via field, requiring per-node filter compilation.
func (p *Policy) hasViaGrants() bool {
if p == nil {
return false
}
for _, grant := range p.Grants {
if len(grant.Via) > 0 {
return true
}
}
return false
}

View File

@ -4629,7 +4629,7 @@ func TestUnmarshalGrants(t *testing.T) {
]
}
`,
wantErr: "grants must specify either 'ip' or 'app' field",
wantErr: "ip and app can not both be empty",
},
{
name: "valid-grant-empty-sources",
@ -4698,7 +4698,7 @@ func TestUnmarshalGrants(t *testing.T) {
]
}
`,
wantErr: "grant 'via' tag is not defined in policy",
wantErr: `tag "tag:undefined-router" not found`,
},
{
name: "invalid-grant-undefined-source-group",