Fixed len computation when size just goes beyond 14 bits (#668)

* Fixed len computation when size just goes beyond 14 bits

* Added bouds checks around 14bits

* Len() always right including when around 14bits boudaries

* Avoid splitting into labels when not applicable

* Fixed comments

* Added comments in code

* Added new test cases

* Fixed computation of Len() for SRV and all kind of records

* Fixed Sign that was relying on non-copy for Unit tests

* Removed unused padding

* Fixed typo in PackBuffer() function

* Added comment about packBufferWithCompressionMap() for testing purposes
This commit is contained in:
Pierre Souchay 2018-05-16 08:20:13 +02:00 committed by Miek Gieben
parent 621df0907e
commit 09649115c1
4 changed files with 394 additions and 118 deletions

View File

@ -101,7 +101,8 @@ Names:
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n")
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR, initLen int) int {\n")
fmt.Fprint(b, "currentLen := initLen\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range domainTypes {
o := scope.Lookup(name)
@ -109,7 +110,10 @@ Names:
fmt.Fprintf(b, "case *%s:\n", name)
for i := 1; i < st.NumFields(); i++ {
out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) }
out := func(s string) {
fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name())
fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name())
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
@ -118,8 +122,12 @@ Names:
case `dns:"cdomain-name"`:
// For HIP we need to slice over the elements in this slice.
fmt.Fprintf(b, `for i := range x.%s {
compressionLenHelper(c, x.%s[i])
}
currentLen -= len(x.%s[i]) + 1
}
`, st.Field(i).Name(), st.Field(i).Name())
fmt.Fprintf(b, `for i := range x.%s {
currentLen += compressionLenHelper(c, x.%s[i], currentLen)
}
`, st.Field(i).Name(), st.Field(i).Name())
}
continue
@ -133,11 +141,11 @@ Names:
}
}
}
fmt.Fprintln(b, "}\n}\n\n")
fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n")
// compressionLenSearchType - search cdomain-tags types for compressible names.
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n")
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range cdomainTypes {
o := scope.Lookup(name)
@ -147,7 +155,7 @@ Names:
j := 1
for i := 1; i < st.NumFields(); i++ {
out := func(s string, j int) {
fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name())
fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name())
}
// There are no slice types with names that can be compressed.
@ -160,13 +168,15 @@ Names:
}
k := "k1"
ok := "ok1"
sz := "sz1"
for i := 2; i < j; i++ {
k += fmt.Sprintf(" + k%d", i)
ok += fmt.Sprintf(" && ok%d", i)
sz += fmt.Sprintf(" + sz%d", i)
}
fmt.Fprintf(b, "return %s, %s\n", k, ok)
fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz)
}
fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n")
fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n")
// gofmt
res, err := format.Source(b.Bytes())

View File

@ -4,6 +4,8 @@ import (
"encoding/hex"
"fmt"
"net"
"reflect"
"strings"
"testing"
)
@ -52,6 +54,7 @@ func TestMsgCompressLength(t *testing.T) {
func TestMsgLength(t *testing.T) {
makeMsg := func(question string, ans, ns, e []RR) *Msg {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion(Fqdn(question), TypeANY)
msg.Answer = append(msg.Answer, ans...)
msg.Ns = append(msg.Ns, ns...)
@ -79,6 +82,92 @@ func TestMsgLength(t *testing.T) {
}
}
func TestCompressionLenHelper(t *testing.T) {
c := make(map[string]int)
compressionLenHelper(c, "example.com", 12)
if c["example.com"] != 12 {
t.Errorf("bad %d", c["example.com"])
}
if c["com"] != 20 {
t.Errorf("bad %d", c["com"])
}
// Test boundaries
c = make(map[string]int)
// foo label starts at 16379
// com label starts at 16384
compressionLenHelper(c, "foo.com", 16379)
if c["foo.com"] != 16379 {
t.Errorf("bad %d", c["foo.com"])
}
// com label is accessible
if c["com"] != 16383 {
t.Errorf("bad %d", c["com"])
}
c = make(map[string]int)
// foo label starts at 16379
// com label starts at 16385 => outside range
compressionLenHelper(c, "foo.com", 16380)
if c["foo.com"] != 16380 {
t.Errorf("bad %d", c["foo.com"])
}
// com label is NOT accessible
if c["com"] != 0 {
t.Errorf("bad %d", c["com"])
}
c = make(map[string]int)
compressionLenHelper(c, "example.com", 16375)
if c["example.com"] != 16375 {
t.Errorf("bad %d", c["example.com"])
}
// com starts AFTER 16384
if c["com"] != 16383 {
t.Errorf("bad %d", c["com"])
}
c = make(map[string]int)
compressionLenHelper(c, "example.com", 16376)
if c["example.com"] != 16376 {
t.Errorf("bad %d", c["example.com"])
}
// com starts AFTER 16384
if c["com"] != 0 {
t.Errorf("bad %d", c["com"])
}
}
func TestCompressionLenSearch(t *testing.T) {
c := make(map[string]int)
compressed, ok, fullSize := compressionLenSearch(c, "a.b.org.")
if compressed != 0 || ok || fullSize != 14 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
c["org."] = 3
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 4 || !ok || fullSize != 8 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
c["b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 6 || !ok || fullSize != 4 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
// Not found long compression
c["x.b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 6 || !ok || fullSize != 4 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
// Found long compression
c["a.b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 8 || !ok || fullSize != 0 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
}
func TestMsgLength2(t *testing.T) {
// Serialized replies
var testMessages = []string{
@ -159,7 +248,7 @@ func TestMsgCompressLengthLargeRecords(t *testing.T) {
msg.SetQuestion("my.service.acme.", TypeSRV)
j := 1
for i := 0; i < 250; i++ {
target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", j, i)
target := fmt.Sprintf("host-redis-1-%d.test.acme.com.node.dc1.consul.", i)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: 1, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", j, i)})
}
@ -172,3 +261,111 @@ func TestMsgCompressLengthLargeRecords(t *testing.T) {
t.Fatalf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}
func TestCompareCompressionMapsForANY(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("a.service.acme.", TypeANY)
// Be sure to have more than 14bits
for i := 0; i < 2000; i++ {
target := fmt.Sprintf("host.app-%d.x%d.test.acme.", i%250, i)
msg.Answer = append(msg.Answer, &AAAA{Hdr: RR_Header{Name: target, Rrtype: TypeAAAA, Class: ClassINET, Ttl: 0x3c}, AAAA: net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(i / 255), byte(i % 255)}})
msg.Answer = append(msg.Answer, &A{Hdr: RR_Header{Name: target, Rrtype: TypeA, Class: ClassINET, Ttl: 0x3c}, A: net.IP{127, 0, byte(i / 255), byte(i % 255)}})
if msg.Len() > 16384 {
break
}
}
for labelSize := 0; labelSize < 63; labelSize++ {
msg.SetQuestion(fmt.Sprintf("a%s.service.acme.", strings.Repeat("x", labelSize)), TypeANY)
compressionFake := make(map[string]int)
lenFake := compressedLenWithCompressionMap(msg, compressionFake)
compressionReal := make(map[string]int)
buf, err := msg.packBufferWithCompressionMap(nil, compressionReal)
if err != nil {
t.Fatal(err)
}
if lenFake != len(buf) {
t.Fatalf("padding= %d ; Predicted len := %d != real:= %d", labelSize, lenFake, len(buf))
}
if !reflect.DeepEqual(compressionFake, compressionReal) {
t.Fatalf("padding= %d ; Fake Compression Map != Real Compression Map\n*** Real:= %v\n\n***Fake:= %v", labelSize, compressionReal, compressionFake)
}
}
}
func TestCompareCompressionMapsForSRV(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("a.service.acme.", TypeSRV)
// Be sure to have more than 14bits
for i := 0; i < 2000; i++ {
target := fmt.Sprintf("host.app-%d.x%d.test.acme.", i%250, i)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: ClassINET, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: target, Rrtype: TypeA, Class: ClassINET, Ttl: 0x3c}, A: net.IP{127, 0, byte(i / 255), byte(i % 255)}})
if msg.Len() > 16384 {
break
}
}
for labelSize := 0; labelSize < 63; labelSize++ {
msg.SetQuestion(fmt.Sprintf("a%s.service.acme.", strings.Repeat("x", labelSize)), TypeAAAA)
compressionFake := make(map[string]int)
lenFake := compressedLenWithCompressionMap(msg, compressionFake)
compressionReal := make(map[string]int)
buf, err := msg.packBufferWithCompressionMap(nil, compressionReal)
if err != nil {
t.Fatal(err)
}
if lenFake != len(buf) {
t.Fatalf("padding= %d ; Predicted len := %d != real:= %d", labelSize, lenFake, len(buf))
}
if !reflect.DeepEqual(compressionFake, compressionReal) {
t.Fatalf("padding= %d ; Fake Compression Map != Real Compression Map\n*** Real:= %v\n\n***Fake:= %v", labelSize, compressionReal, compressionFake)
}
}
}
func TestMsgCompressLengthLargeRecordsWithPaddingPermutation(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("my.service.acme.", TypeSRV)
for i := 0; i < 250; i++ {
target := fmt.Sprintf("host-redis-x-%d.test.acme.com.node.dc1.consul.", i)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: ClassINET, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.x.%d.", i)})
}
for labelSize := 1; labelSize < 63; labelSize++ {
msg.SetQuestion(fmt.Sprintf("my.%s.service.acme.", strings.Repeat("x", labelSize)), TypeSRV)
predicted := msg.Len()
buf, err := msg.Pack()
if err != nil {
t.Error(err)
}
if predicted != len(buf) {
t.Fatalf("padding= %d ; predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", labelSize, msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}
}
func TestMsgCompressLengthLargeRecordsAllValues(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("redis.service.consul.", TypeSRV)
for i := 0; i < 900; i++ {
target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", i/256, i%256)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: ClassINET, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", i/256, i%256)})
predicted := msg.Len()
buf, err := msg.Pack()
if err != nil {
t.Error(err)
}
if predicted != len(buf) {
t.Fatalf("predicted compressed length is wrong for %d records: predicted %s (len=%d) %d, actual %d", i, msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}
}

120
msg.go
View File

@ -694,15 +694,19 @@ func (dns *Msg) Pack() (msg []byte, err error) {
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small
// a new buffer is allocated.
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig.
var (
dh Header
compression map[string]int
)
var compression map[string]int
if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings
}
return dns.packBufferWithCompressionMap(buf, compression)
}
// packBufferWithCompressionMap is ONLY for testing purposes
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig.
var (
dh Header
)
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode
@ -921,23 +925,27 @@ func (dns *Msg) String() string {
// than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int {
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
compressionLenHelper(compression, r.Name, l)
l += r.len()
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra)
return l
}
// compressedLen returns the message length when in compressed wire format
// when compress is true, otherwise the uncompressed length is returned.
func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
l := 12 // Message header is always 12 bytes
if compress {
compression := map[string]int{}
for _, r := range dns.Question {
l += r.len()
compressionLenHelper(compression, r.Name)
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra)
return l
return compressedLenWithCompressionMap(dns, compression)
}
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
l += r.len()
@ -961,70 +969,94 @@ func compressedLen(dns *Msg, compress bool) int {
return l
}
func compressionLenSlice(len int, c map[string]int, rs []RR) int {
var l int
func compressionLenSlice(lenp int, c map[string]int, rs []RR) int {
initLen := lenp
for _, r := range rs {
if r == nil {
continue
}
// track this length, and the global length in len, while taking compression into account for both.
// TmpLen is to track len of record at 14bits boudaries
tmpLen := lenp
x := r.len()
l += x
len += x
k, ok := compressionLenSearch(c, r.Header().Name)
// track this length, and the global length in len, while taking compression into account for both.
k, ok, _ := compressionLenSearch(c, r.Header().Name)
if ok {
l += 1 - k
len += 1 - k
// Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
// so, basically x:= x - k - 1 + 2
x += 1 - k
}
if len < maxCompressionOffset {
compressionLenHelper(c, r.Header().Name)
}
k, ok = compressionLenSearchType(c, r)
tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
k, ok, _ = compressionLenSearchType(c, r)
if ok {
l += 1 - k
len += 1 - k
x += 1 - k
}
lenp += x
tmpLen = lenp
tmpLen += compressionLenHelperType(c, r, tmpLen)
if len < maxCompressionOffset {
compressionLenHelperType(c, r)
}
}
return l
return lenp - initLen
}
// Put the parts of the name in the compression map.
func compressionLenHelper(c map[string]int, s string) {
// Put the parts of the name in the compression map, return the size in bytes added in payload
func compressionLenHelper(c map[string]int, s string, currentLen int) int {
if currentLen > maxCompressionOffset {
// We won't be able to add any label that could be re-used later anyway
return 0
}
if _, ok := c[s]; ok {
return 0
}
initLen := currentLen
pref := ""
prev := s
lbs := Split(s)
for j := len(lbs) - 1; j >= 0; j-- {
for j := 0; j < len(lbs); j++ {
pref = s[lbs[j]:]
currentLen += len(prev) - len(pref)
prev = pref
if _, ok := c[pref]; !ok {
c[pref] = len(pref)
// If first byte label is within the first 14bits, it might be re-used later
if currentLen < maxCompressionOffset {
c[pref] = currentLen
}
} else {
added := currentLen - initLen
if j > 0 {
// We added a new PTR
added += 2
}
return added
}
}
return currentLen - initLen
}
// Look for each part in the compression map and returns its length,
// keep on searching so we get the longest match.
func compressionLenSearch(c map[string]int, s string) (int, bool) {
// Will return the size of compression found, whether a match has been
// found and the size of record if added in payload
func compressionLenSearch(c map[string]int, s string) (int, bool, int) {
off := 0
end := false
if s == "" { // don't bork on bogus data
return 0, false
return 0, false, 0
}
fullSize := 0
for {
if _, ok := c[s[off:]]; ok {
return len(s[off:]), true
return len(s[off:]), true, fullSize + off
}
if end {
break
}
// Each label descriptor takes 2 bytes, add it
fullSize += 2
off, end = NextLabel(s, off)
}
return 0, false
return 0, false, fullSize + len(s)
}
// Copy returns a new RR which is a deep-copy of r.

View File

@ -2,117 +2,154 @@
package dns
func compressionLenHelperType(c map[string]int, r RR) {
func compressionLenHelperType(c map[string]int, r RR, initLen int) int {
currentLen := initLen
switch x := r.(type) {
case *AFSDB:
compressionLenHelper(c, x.Hostname)
currentLen -= len(x.Hostname) + 1
currentLen += compressionLenHelper(c, x.Hostname, currentLen)
case *CNAME:
compressionLenHelper(c, x.Target)
currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *DNAME:
compressionLenHelper(c, x.Target)
currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *HIP:
for i := range x.RendezvousServers {
compressionLenHelper(c, x.RendezvousServers[i])
currentLen -= len(x.RendezvousServers[i]) + 1
}
for i := range x.RendezvousServers {
currentLen += compressionLenHelper(c, x.RendezvousServers[i], currentLen)
}
case *KX:
compressionLenHelper(c, x.Exchanger)
currentLen -= len(x.Exchanger) + 1
currentLen += compressionLenHelper(c, x.Exchanger, currentLen)
case *LP:
compressionLenHelper(c, x.Fqdn)
currentLen -= len(x.Fqdn) + 1
currentLen += compressionLenHelper(c, x.Fqdn, currentLen)
case *MB:
compressionLenHelper(c, x.Mb)
currentLen -= len(x.Mb) + 1
currentLen += compressionLenHelper(c, x.Mb, currentLen)
case *MD:
compressionLenHelper(c, x.Md)
currentLen -= len(x.Md) + 1
currentLen += compressionLenHelper(c, x.Md, currentLen)
case *MF:
compressionLenHelper(c, x.Mf)
currentLen -= len(x.Mf) + 1
currentLen += compressionLenHelper(c, x.Mf, currentLen)
case *MG:
compressionLenHelper(c, x.Mg)
currentLen -= len(x.Mg) + 1
currentLen += compressionLenHelper(c, x.Mg, currentLen)
case *MINFO:
compressionLenHelper(c, x.Rmail)
compressionLenHelper(c, x.Email)
currentLen -= len(x.Rmail) + 1
currentLen += compressionLenHelper(c, x.Rmail, currentLen)
currentLen -= len(x.Email) + 1
currentLen += compressionLenHelper(c, x.Email, currentLen)
case *MR:
compressionLenHelper(c, x.Mr)
currentLen -= len(x.Mr) + 1
currentLen += compressionLenHelper(c, x.Mr, currentLen)
case *MX:
compressionLenHelper(c, x.Mx)
currentLen -= len(x.Mx) + 1
currentLen += compressionLenHelper(c, x.Mx, currentLen)
case *NAPTR:
compressionLenHelper(c, x.Replacement)
currentLen -= len(x.Replacement) + 1
currentLen += compressionLenHelper(c, x.Replacement, currentLen)
case *NS:
compressionLenHelper(c, x.Ns)
currentLen -= len(x.Ns) + 1
currentLen += compressionLenHelper(c, x.Ns, currentLen)
case *NSAPPTR:
compressionLenHelper(c, x.Ptr)
currentLen -= len(x.Ptr) + 1
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
case *NSEC:
compressionLenHelper(c, x.NextDomain)
currentLen -= len(x.NextDomain) + 1
currentLen += compressionLenHelper(c, x.NextDomain, currentLen)
case *PTR:
compressionLenHelper(c, x.Ptr)
currentLen -= len(x.Ptr) + 1
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
case *PX:
compressionLenHelper(c, x.Map822)
compressionLenHelper(c, x.Mapx400)
currentLen -= len(x.Map822) + 1
currentLen += compressionLenHelper(c, x.Map822, currentLen)
currentLen -= len(x.Mapx400) + 1
currentLen += compressionLenHelper(c, x.Mapx400, currentLen)
case *RP:
compressionLenHelper(c, x.Mbox)
compressionLenHelper(c, x.Txt)
currentLen -= len(x.Mbox) + 1
currentLen += compressionLenHelper(c, x.Mbox, currentLen)
currentLen -= len(x.Txt) + 1
currentLen += compressionLenHelper(c, x.Txt, currentLen)
case *RRSIG:
compressionLenHelper(c, x.SignerName)
currentLen -= len(x.SignerName) + 1
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
case *RT:
compressionLenHelper(c, x.Host)
currentLen -= len(x.Host) + 1
currentLen += compressionLenHelper(c, x.Host, currentLen)
case *SIG:
compressionLenHelper(c, x.SignerName)
currentLen -= len(x.SignerName) + 1
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
case *SOA:
compressionLenHelper(c, x.Ns)
compressionLenHelper(c, x.Mbox)
currentLen -= len(x.Ns) + 1
currentLen += compressionLenHelper(c, x.Ns, currentLen)
currentLen -= len(x.Mbox) + 1
currentLen += compressionLenHelper(c, x.Mbox, currentLen)
case *SRV:
compressionLenHelper(c, x.Target)
currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *TALINK:
compressionLenHelper(c, x.PreviousName)
compressionLenHelper(c, x.NextName)
currentLen -= len(x.PreviousName) + 1
currentLen += compressionLenHelper(c, x.PreviousName, currentLen)
currentLen -= len(x.NextName) + 1
currentLen += compressionLenHelper(c, x.NextName, currentLen)
case *TKEY:
compressionLenHelper(c, x.Algorithm)
currentLen -= len(x.Algorithm) + 1
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
case *TSIG:
compressionLenHelper(c, x.Algorithm)
currentLen -= len(x.Algorithm) + 1
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
}
return currentLen - initLen
}
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {
switch x := r.(type) {
case *AFSDB:
k1, ok1 := compressionLenSearch(c, x.Hostname)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Hostname)
return k1, ok1, sz1
case *CNAME:
k1, ok1 := compressionLenSearch(c, x.Target)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Target)
return k1, ok1, sz1
case *MB:
k1, ok1 := compressionLenSearch(c, x.Mb)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Mb)
return k1, ok1, sz1
case *MD:
k1, ok1 := compressionLenSearch(c, x.Md)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Md)
return k1, ok1, sz1
case *MF:
k1, ok1 := compressionLenSearch(c, x.Mf)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Mf)
return k1, ok1, sz1
case *MG:
k1, ok1 := compressionLenSearch(c, x.Mg)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Mg)
return k1, ok1, sz1
case *MINFO:
k1, ok1 := compressionLenSearch(c, x.Rmail)
k2, ok2 := compressionLenSearch(c, x.Email)
return k1 + k2, ok1 && ok2
k1, ok1, sz1 := compressionLenSearch(c, x.Rmail)
k2, ok2, sz2 := compressionLenSearch(c, x.Email)
return k1 + k2, ok1 && ok2, sz1 + sz2
case *MR:
k1, ok1 := compressionLenSearch(c, x.Mr)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Mr)
return k1, ok1, sz1
case *MX:
k1, ok1 := compressionLenSearch(c, x.Mx)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Mx)
return k1, ok1, sz1
case *NS:
k1, ok1 := compressionLenSearch(c, x.Ns)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
return k1, ok1, sz1
case *PTR:
k1, ok1 := compressionLenSearch(c, x.Ptr)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Ptr)
return k1, ok1, sz1
case *RT:
k1, ok1 := compressionLenSearch(c, x.Host)
return k1, ok1
k1, ok1, sz1 := compressionLenSearch(c, x.Host)
return k1, ok1, sz1
case *SOA:
k1, ok1 := compressionLenSearch(c, x.Ns)
k2, ok2 := compressionLenSearch(c, x.Mbox)
return k1 + k2, ok1 && ok2
k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
k2, ok2, sz2 := compressionLenSearch(c, x.Mbox)
return k1 + k2, ok1 && ok2, sz1 + sz2
}
return 0, false
return 0, false, 0
}