diff --git a/net/art/table_test.go b/net/art/table_test.go index 7277a5a90..194705138 100644 --- a/net/art/table_test.go +++ b/net/art/table_test.go @@ -5,6 +5,7 @@ package art import ( crand "crypto/rand" + "encoding/binary" "fmt" "math/rand" "net/netip" @@ -578,6 +579,240 @@ func TestDelete(t *testing.T) { }) } +// tableInstr is an instruction to perform on a routing table. +// +// It has methods for converting to and from a byte array that have +// been designed such that small random pertubations in the []byte +// translate into small pertubations in the instructions, i.e. to give a +// directed fuzzer a good handle on the table interface. +// +// The wire format is: +// +// type tableInstrWire struct { +// action byte // [0,85] Insert, [86,170] Get, [171,254] Delete +// v4orv6 byte // <= 128 IPv4, > 128 IPv6 +// bits uint8 // for IPv4: divide by 8 for [0,32], for IPv6: divide by 2 for [0,128] +// _ uint8 // make it a power of 2 at least +// addr [16]uint8 // ip address +// val uint64 // table value to store on insert (ignored on get/delete) +// } +type tableInstr struct { + action instrAction + prefix netip.Prefix + value uint64 +} + +type instrAction int + +const ( + instrInsert instrAction = 0 + instrGet instrAction = 1 + instrDelete instrAction = 2 +) + +const sizeofTableInstr = 28 + +func (instr tableInstr) String() string { + switch instr.action { + case instrInsert: + return fmt.Sprintf("insert(%v, %v)", instr.prefix, instr.value) + case instrDelete: + return fmt.Sprintf("delete(%v)", instr.prefix) + case instrGet: + return fmt.Sprintf("get(%v)", instr.prefix.Addr()) + } + return fmt.Sprintf("unknown-instr(%v, %v, %v)", instr.action, instr.prefix, instr.value) +} + +func (instr *tableInstr) load(b [sizeofTableInstr]byte) { + if action := b[0]; action <= 85 { + instr.action = instrInsert + } else if action <= 170 { + instr.action = instrGet + } else { + instr.action = instrDelete + } + v4orv6 := b[1] + bits := b[2] + addrBytes := b[4:20] + instr.value = binary.BigEndian.Uint64(b[20:]) + + if v4orv6 < 128 { + addr := netip.AddrFrom4([4]byte(addrBytes[:4])) + instr.prefix = netip.PrefixFrom(addr, int(bits/8)) + } else { + addr := netip.AddrFrom16([16]byte(addrBytes)) + instr.prefix = netip.PrefixFrom(addr, int(bits/2)) + } +} + +func (instr tableInstr) store(b [sizeofTableInstr]byte) { + switch instr.action { + case instrInsert: + b[0] = 0 + case instrGet: + b[0] = 86 + case instrDelete: + b[0] = 171 + } + if instr.prefix.Addr().Is4() { + b[1] = 0 + } else { + b[1] = 129 + } + if instr.prefix.Addr().Is4() { + b[2] = byte(instr.prefix.Bits() * 8) + } else { + b[2] = byte(instr.prefix.Bits() * 2) + } + b[3] = 0 // padding + addrBytes := instr.prefix.Addr().As16() + copy(b[4:20], addrBytes[:]) + binary.BigEndian.PutUint64(b[20:], instr.value) +} + +func FuzzInsertDelete(f *testing.F) { + p := netip.MustParsePrefix + testcases := [][]tableInstr{ + // TestRegression/prefixes_aligned_on_stride_boundary. + // TODO: should we deduplicate these somehow? we are effectively running + // these regression tests twice. + { + {instrInsert, p("226.205.197.0/24"), 1}, + {instrInsert, p("226.205.0.0/16"), 2}, + {instrGet, p("226.205.121.152/32"), 0}, + }, + // TestRegression/parent_prefix_inserted_in_different_orders + { + {instrInsert, p("136.20.0.0/16"), 1}, + {instrInsert, p("136.20.201.62/32"), 2}, + {instrGet, p("136.20.54.139/32"), 0}, + }, + // Non-caonical prefixes in slowPrefixTable (found by fuzzing). + { + {instrInsert, p("48.48.48.48/3"), 1}, + {instrDelete, p("48.48.48.49/3"), 2}, + {instrGet, p("48.48.48.48/32"), 0}, + }, + // Value not updated in slowPrefixTable (found by fuzzing). + { + {instrInsert, p("1.2.1.1/3"), 1}, + {instrInsert, p("1.2.1.1/3"), 2}, + {instrGet, p("1.2.1.1/32"), 0}, + }, + // Arbitrary test data generated by hand, to get an IPv6 address. + { + {instrInsert, p("ff:aaaa::1/128"), 1}, + {instrInsert, p("10.0.1.1/24"), 4010}, + {instrInsert, p("10.0.2.1/24"), 4011}, + {instrInsert, p("10.0.3.1/24"), 4012}, + {instrInsert, p("10.0.4.1/24"), 4013}, + {instrInsert, p("10.0.5.1/24"), 4014}, + {instrInsert, p("10.0.6.1/24"), 4015}, + {instrInsert, p("10.0.7.1/24"), 4016}, + {instrInsert, p("10.0.8.1/24"), 4017}, + {instrInsert, p("10.0.250.1/24"), 8010}, + {instrInsert, p("10.0.251.1/24"), 8011}, + {instrInsert, p("10.0.252.1/24"), 8012}, + {instrGet, p("ff:aaaa::1/128"), 0}, + {instrGet, p("ff:aaab::3:1/128"), 0}, + {instrGet, p("10.0.1.55/24"), 0}, + {instrGet, p("10.0.2.55/24"), 0}, + {instrGet, p("10.0.3.55/24"), 0}, + {instrGet, p("10.0.4.55/24"), 0}, + {instrGet, p("10.0.5.55/24"), 0}, + {instrGet, p("10.0.6.55/24"), 0}, + {instrGet, p("10.0.7.55/24"), 0}, + {instrGet, p("10.0.9.55/24"), 0}, + {instrGet, p("10.0.100.55/24"), 0}, + {instrGet, p("10.0.250.55/24"), 0}, + {instrGet, p("10.0.255.55/24"), 0}, + }, + // TODO more + } + for _, tc := range testcases { + b := make([]byte, sizeofTableInstr*len(tc)) + for i, instr := range tc { + instr.store([sizeofTableInstr]byte(b[i*sizeofTableInstr:])) + } + f.Add(b) + } + printSeq := func(t *testing.T, tc []tableInstr) { + t.Logf("sequence (%d steps):", len(tc)) + for _, step := range tc { + t.Logf("\t%s", step) + } + } + f.Fuzz(func(t *testing.T, b []byte) { + tc := make([]tableInstr, len(b)/sizeofTableInstr) + for i := range tc { + tc[i].load([sizeofTableInstr]byte(b[i*sizeofTableInstr:])) + } + + t1 := &Table[uint64]{} + t2 := &slowPrefixTable[uint64]{} + + // Intern values to make the pointers in the table useful. + // (Otherwise each insert instruction the fuzzer generates gets what + // is effectively a new value, so they cannot be reused.) + vals := make(map[uint64]*uint64) + + for i, instr := range tc { + switch instr.action { + case instrInsert: + val, found := vals[instr.value] + if !found { + val = new(uint64) + *val = instr.value + vals[*val] = val + } + t1.Insert(instr.prefix, val) + t2.insert(instr.prefix, val) + case instrDelete: + t1.Delete(instr.prefix) + t2.delete(instr.prefix) + case instrGet: + got := t1.Get(instr.prefix.Addr()) + want := t2.get(instr.prefix.Addr()) + if got != want { + printSeq(t, tc[:i+1]) + var gotVal, wantVal uint64 + if got != nil { + gotVal = *got + } + if want != nil { + wantVal = *want + } + t.Errorf("get(%q) = %p (%d), want %p (%d)", instr.prefix.Addr(), got, gotVal, want, wantVal) + return + } + } + } + + // TODO: do another pass by copying tc and shuffling each contiguous sequence of instrInsert entries. + // build one table with the original tc and compare to a second table built with the shuffled tc + // and compare for equivalence. + }) +} + +func TestJunk(t *testing.T) { // TODO delete, double checking a result from the fuzz test + t1 := &Table[uint64]{} + t2 := &slowPrefixTable[uint64]{} + + p := netip.MustParsePrefix + val1 := uint64(3472328296227680304) + val2 := uint64(3472328296227680305) + t1.Insert(p("48.48.48.48/0"), &val1) + t2.insert(p("48.48.48.48/0"), &val1) + t1.Insert(p("48.48.48.48/0"), &val2) + t2.insert(p("48.48.48.48/0"), &val2) + got := t1.Get(netip.MustParseAddr("48.48.48.48")) + want := t2.get(netip.MustParseAddr("48.48.48.48")) + if got != want { + t.Errorf("got=%p, want=%p", got, want) + } +} + func TestInsertCompare(t *testing.T) { // Create large route tables repeatedly, and compare Table's // behavior to a naive and slow but correct implementation. @@ -1086,6 +1321,7 @@ type slowPrefixEntry[T any] struct { } func (t *slowPrefixTable[T]) delete(pfx netip.Prefix) { + pfx = pfx.Masked() ret := make([]slowPrefixEntry[T], 0, len(t.prefixes)) for _, ent := range t.prefixes { if ent.pfx == pfx { @@ -1097,9 +1333,10 @@ func (t *slowPrefixTable[T]) delete(pfx netip.Prefix) { } func (t *slowPrefixTable[T]) insert(pfx netip.Prefix, val *T) { - for _, ent := range t.prefixes { + pfx = pfx.Masked() + for i, ent := range t.prefixes { if ent.pfx == pfx { - ent.val = val + t.prefixes[i].val = val return } }