mirror of
https://github.com/tailscale/tailscale.git
synced 2026-05-05 12:16:44 +02:00
wip
This commit is contained in:
parent
f174ecb6fd
commit
1d4916043d
126
appc/conn25.go
126
appc/conn25.go
@ -7,14 +7,124 @@ import (
|
||||
"net/netip"
|
||||
"sync"
|
||||
|
||||
"go4.org/netipx"
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
var tsMBox = dnsmessage.MustNewName("support.tailscale.com.")
|
||||
|
||||
type appAddr struct {
|
||||
app string
|
||||
addr netip.Addr
|
||||
}
|
||||
|
||||
// Conn25 holds the developing state for the as yet nascent next generation app connector.
|
||||
// There is currently (2025-12-08) no actual app connecting functionality.
|
||||
type Conn25 struct {
|
||||
mu sync.Mutex
|
||||
magicIPPool ippool // should not be mutated
|
||||
transitIPPool ippool // should not be mutated
|
||||
|
||||
mu sync.Mutex
|
||||
// map of peer -> (map of transitip -> dst ip)
|
||||
transitIPs map[tailcfg.NodeID]map[netip.Addr]netip.Addr
|
||||
// map of peer -> (map of magicip -> appAddr of dst ip)
|
||||
magicIPs map[tailcfg.NodeID]map[netip.Addr]appAddr
|
||||
}
|
||||
|
||||
func NewConn25(magicPool, transitPool *netipx.IPSet) *Conn25 {
|
||||
return &Conn25{
|
||||
magicIPPool: *newIPPool(magicPool),
|
||||
transitIPPool: *newIPPool(transitPool),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn25) assignMagic(domain string, addr netip.Addr) (netip.Addr, error) {
|
||||
mip, err := c.magicIPPool.next()
|
||||
if err != nil {
|
||||
// TODO(fran) the pool is exhausted, what to do?
|
||||
return netip.Addr{}, err
|
||||
}
|
||||
// TODO(fran) plumb this through from somewhere
|
||||
nid := tailcfg.NodeID(1)
|
||||
// TODO(fran)
|
||||
app := "dunno? " + domain
|
||||
c.setMagicIP(nid, mip, addr, app)
|
||||
return mip, nil
|
||||
}
|
||||
|
||||
func (c *Conn25) MapDNSResponse(buf []byte) []byte {
|
||||
// TODO(fran) should we be passing everything through (pretending we're not here)
|
||||
// or eg putting our info in SOARecords?
|
||||
// TODO(fran) does something a bit more general than this belong in the dns package somewhere?
|
||||
// how similar is it to what we do in natc (not _super_ similar), or eg sniproxy, messagecache, peerapi
|
||||
var msg dnsmessage.Message
|
||||
err := msg.Unpack(buf)
|
||||
if err != nil {
|
||||
return buf
|
||||
}
|
||||
|
||||
var resolves map[string][]netip.Addr
|
||||
var addrQCount int
|
||||
for _, q := range msg.Questions {
|
||||
if q.Type != dnsmessage.TypeA && q.Type != dnsmessage.TypeAAAA {
|
||||
continue
|
||||
}
|
||||
addrQCount++
|
||||
}
|
||||
|
||||
rcode := dnsmessage.RCodeSuccess
|
||||
if addrQCount > 0 && len(resolves) == 0 {
|
||||
rcode = dnsmessage.RCodeNameError
|
||||
}
|
||||
|
||||
b := dnsmessage.NewBuilder(nil,
|
||||
dnsmessage.Header{
|
||||
ID: msg.Header.ID,
|
||||
Response: true,
|
||||
Authoritative: true,
|
||||
RCode: rcode,
|
||||
})
|
||||
b.EnableCompression()
|
||||
|
||||
if err := b.StartQuestions(); err != nil {
|
||||
return buf
|
||||
}
|
||||
|
||||
for _, q := range msg.Questions {
|
||||
b.Question(q)
|
||||
}
|
||||
|
||||
if err := b.StartAnswers(); err != nil {
|
||||
return buf
|
||||
}
|
||||
|
||||
for _, a := range msg.Answers {
|
||||
switch a.Header.Type {
|
||||
case dnsmessage.TypeA:
|
||||
msgARecord := (a.Body).(*dnsmessage.AResource)
|
||||
ourAddr, err := c.assignMagic(a.Header.Name.String(), netip.AddrFrom4(msgARecord.A))
|
||||
if err != nil {
|
||||
return buf
|
||||
}
|
||||
if err := b.AResource(
|
||||
a.Header,
|
||||
dnsmessage.AResource{A: ourAddr.As4()},
|
||||
); err != nil {
|
||||
return buf
|
||||
}
|
||||
default:
|
||||
// TODO how to just write whatever we already have? is this it?
|
||||
body := a.Body.(*dnsmessage.UnknownResource)
|
||||
b.UnknownResource(a.Header, *body)
|
||||
}
|
||||
}
|
||||
|
||||
outbs, err := b.Finish()
|
||||
if err != nil {
|
||||
return buf
|
||||
}
|
||||
return outbs
|
||||
}
|
||||
|
||||
const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest"
|
||||
@ -55,6 +165,20 @@ func (c *Conn25) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPReques
|
||||
return TransitIPResponse{}
|
||||
}
|
||||
|
||||
func (c *Conn25) setMagicIP(nid tailcfg.NodeID, magicAddr, dstAddr netip.Addr, app string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.magicIPs == nil {
|
||||
c.magicIPs = make(map[tailcfg.NodeID]map[netip.Addr]appAddr)
|
||||
}
|
||||
peerMap, ok := c.magicIPs[nid]
|
||||
if !ok {
|
||||
peerMap = make(map[netip.Addr]appAddr)
|
||||
c.magicIPs[nid] = peerMap
|
||||
}
|
||||
peerMap[magicAddr] = appAddr{addr: dstAddr, app: app}
|
||||
}
|
||||
|
||||
func (c *Conn25) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
package appc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
@ -186,3 +187,10 @@ func TestTransitIPTargetUnknownTIP(t *testing.T) {
|
||||
t.Fatalf("Unknown transit addr, want: %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFran(t *testing.T) {
|
||||
respbs := []byte{170, 165, 129, 128, 0, 1, 0, 1, 0, 0, 0, 0, 6, 103, 111, 111, 103, 108, 101, 3, 99, 111, 109, 0, 0, 1, 0, 1, 192, 12, 0, 1, 0, 1, 0, 0, 0, 15, 0, 4, 142, 250, 188, 238}
|
||||
c := &Conn25{}
|
||||
mappedBytes := c.MapDNSResponse(respbs)
|
||||
fmt.Println(mappedBytes)
|
||||
}
|
||||
|
||||
@ -23,6 +23,7 @@ func init() {
|
||||
feature.Register(featureName)
|
||||
newExtension := func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) {
|
||||
e := &extension{
|
||||
// TODO(fran) 2025-12-18 we need to unify this with the conn25 in [ipnlocal.LocalBackend]
|
||||
conn: &appc.Conn25{},
|
||||
}
|
||||
return e, nil
|
||||
|
||||
@ -88,6 +88,7 @@ import (
|
||||
"tailscale.com/util/execqueue"
|
||||
"tailscale.com/util/goroutines"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/util/osuser"
|
||||
"tailscale.com/util/rands"
|
||||
"tailscale.com/util/set"
|
||||
@ -271,6 +272,7 @@ type LocalBackend struct {
|
||||
ccGen clientGen // function for producing controlclient; lazily populated
|
||||
sshServer SSHServer // or nil, initialized lazily.
|
||||
appConnector *appc.AppConnector // or nil, initialized when configured.
|
||||
conn25 *appc.Conn25 // or nil, initialized when configured.
|
||||
// notifyCancel cancels notifications to the current SetNotifyCallback.
|
||||
notifyCancel context.CancelFunc
|
||||
cc controlclient.Client // TODO(nickkhyl): move to nodeBackend
|
||||
@ -4923,6 +4925,27 @@ func (b *LocalBackend) blockEngineUpdatesLocked(block bool) {
|
||||
b.blocked = block
|
||||
}
|
||||
|
||||
func (b *LocalBackend) reconfigConn25(nm *netmap.NetworkMap, prefs ipn.PrefsView) {
|
||||
// TODO(fran) figure out if there's conn25ing happening, presumably if there's connectors in capmap and not like --accept-routes=false???? something?
|
||||
// nb in contrast to appc, conn25 needs to keep state on the client too.
|
||||
// TODO(fran) what happens when the profile changes? that's why we get called from authReconfig right?
|
||||
// TODO(fran) this conn25 needs to be the same one in the extension in /feature/conn25
|
||||
if b.conn25 == nil {
|
||||
// TODO debug code
|
||||
mpoolbuilder := &netipx.IPSetBuilder{}
|
||||
mpoolbuilder.AddPrefix(netip.MustParsePrefix("1.0.0.0/16"))
|
||||
tpoolbuilder := &netipx.IPSetBuilder{}
|
||||
tpoolbuilder.AddPrefix(netip.MustParsePrefix("2.0.0.0/16"))
|
||||
b.conn25 = appc.NewConn25(must.Get(mpoolbuilder.IPSet()), must.Get(tpoolbuilder.IPSet()))
|
||||
dnsManager, ok := b.sys.DNSManager.GetOK()
|
||||
if ok { // TODO
|
||||
dnsManager.QueryResponseMapper = func(inbs []byte) []byte {
|
||||
return b.conn25.MapDNSResponse(inbs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reconfigAppConnectorLocked updates the app connector state based on the
|
||||
// current network map and preferences.
|
||||
// b.mu must be held.
|
||||
@ -5065,6 +5088,7 @@ func (b *LocalBackend) authReconfigLocked() {
|
||||
dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS())
|
||||
// If the current node is an app connector, ensure the app connector machine is started
|
||||
b.reconfigAppConnectorLocked(nm, prefs)
|
||||
b.reconfigConn25(nm, prefs)
|
||||
|
||||
if !prefs.WantRunning() {
|
||||
b.logf("[v1] authReconfig: skipping because !WantRunning.")
|
||||
|
||||
@ -841,6 +841,12 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.
|
||||
|
||||
// Add split DNS routes, with no regard to exit node configuration.
|
||||
addSplitDNSRoutes(nm.DNS.Routes)
|
||||
// TODO(fran) here's where we look for the capmap for conn25
|
||||
if nm.SelfName() == "d783302cc665.taile25f.ts.net." {
|
||||
addSplitDNSRoutes(map[string][]*dnstype.Resolver{
|
||||
"google.com": {&dnstype.Resolver{Addr: "http://100.105.210.108:41811/dns-query"}},
|
||||
})
|
||||
}
|
||||
|
||||
// Set FallbackResolvers as the default resolvers in the
|
||||
// scenarios that can't handle a purely split-DNS config. See
|
||||
|
||||
@ -67,6 +67,8 @@ type Manager struct {
|
||||
knobs *controlknobs.Knobs // or nil
|
||||
goos string // if empty, gets set to runtime.GOOS
|
||||
|
||||
QueryResponseMapper func(bs []byte) []byte
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called.
|
||||
}
|
||||
@ -465,7 +467,15 @@ func (m *Manager) Query(ctx context.Context, bs []byte, family string, from neti
|
||||
return nil, errFullQueue
|
||||
}
|
||||
defer atomic.AddInt32(&m.activeQueriesAtomic, -1)
|
||||
return m.resolver.Query(ctx, bs, family, from)
|
||||
outbs, err := m.resolver.Query(ctx, bs, family, from)
|
||||
if err == nil && m.QueryResponseMapper != nil {
|
||||
outbs = m.QueryResponseMapper(outbs)
|
||||
}
|
||||
return outbs, err
|
||||
}
|
||||
|
||||
func (m *Manager) fran() {
|
||||
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user