dns/types_generate.go

281 lines
7.3 KiB
Go
Raw Normal View History

//+build ignore
// types_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
2015-10-16 16:01:49 +00:00
// it will generate conversion tables (TypeToRR and TypeToString) and banal
// methods (len, Header, copy) based on the struct tags. The generated source is
// written to ztypes.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
"strings"
"text/template"
)
var skipLen = map[string]struct{}{
"NSEC": {},
"NSEC3": {},
"OPT": {},
"CSYNC": {},
}
var packageHdr = `
// Code generated by "go run types_generate.go"; DO NOT EDIT.
package dns
import (
"encoding/base64"
"net"
)
`
2015-10-16 16:01:49 +00:00
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
// TypeToRR is a map of constructors for each RR type.
var TypeToRR = map[uint16]func() RR{
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
{{end}}{{end}} }
`))
var typeToString = template.Must(template.New("typeToString").Parse(`
2015-10-16 16:01:49 +00:00
// TypeToString is a map of strings for each RR type.
var TypeToString = map[uint16]string{
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
}
`))
var headerFunc = template.Must(template.New("headerFunc").Parse(`
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
{{end}}
`))
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect constants like TypeX
var numberedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
b, ok := o.Type().(*types.Basic)
if !ok || b.Kind() != types.Uint16 {
continue
}
if !strings.HasPrefix(o.Name(), "Type") {
continue
}
name := strings.TrimPrefix(o.Name(), "Type")
if name == "PrivateRR" {
continue
}
numberedTypes = append(numberedTypes, name)
}
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" {
continue
}
// Check if corresponding TypeX exists
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
2015-10-16 16:01:49 +00:00
// Generate TypeToRR
fatalIfErr(TypeToRR.Execute(b, namedTypes))
// Generate typeToString
fatalIfErr(typeToString.Execute(b, numberedTypes))
// Generate headerFunc
fatalIfErr(headerFunc.Execute(b, namedTypes))
// Generate len()
fmt.Fprint(b, "// len() functions\n")
for _, name := range namedTypes {
if _, ok := skipLen[name]; ok {
continue
}
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
Properly calculate compressed message lengths (#833) * Remove fullSize return from compressionLenSearch This wasn't used anywhere but TestCompressionLenSearch, and was very wrong. * Add generated compressedLen functions and use them This replaces the confusing and complicated compressionLenSlice function. * Use compressedLenWithCompressionMap even for uncompressed This leaves the len() functions unused and they'll soon be removed. This also fixes the off-by-one error of compressedLen when a (Q)NAME is ".". * Use Len helper instead of RR.len private method * Merge len and compressedLen functions * Merge compressedLen helper into Msg.Len * Remove compress bool from compressedLenWithCompressionMap * Merge map insertion into compressionLenSearch This eliminates the need to loop over the domain name twice when we're compressing the name. * Use compressedNameLen for NSEC.NextDomain This was a mistake. * Remove compress from RR.len * Add test case for multiple questions length * Add test case for MINFO and SOA compression These are the only RRs with multiple compressible names within the same RR, and they were previously broken. * Rename compressedNameLen to domainNameLen It also handles the length of uncompressed domain names. * Use off directly instead of len(s[:off]) * Move initial maxCompressionOffset check out of compressionLenMapInsert This should allow us to avoid the call overhead of compressionLenMapInsert in certain limited cases and may result in a slight performance increase. compressionLenMapInsert still has a maxCompressionOffset check inside the for loop. * Rename compressedLenWithCompressionMap to msgLenWithCompressionMap This better reflects that it also calculates the uncompressed length. * Merge TestMsgCompressMINFO with TestMsgCompressSOA They're both testing the same thing. * Remove compressionLenMapInsert compressionLenSearch does everything compressionLenMapInsert did anyway. * Only call compressionLenSearch in one place in domainNameLen * Split if statement in domainNameLen The last two commits worsened the performance of domainNameLen noticably, this change restores it's original performance. name old time/op new time/op delta MsgLength-12 550ns ±13% 510ns ±21% ~ (p=0.050 n=10+10) MsgLengthNoCompression-12 26.9ns ± 2% 27.0ns ± 1% ~ (p=0.198 n=9+10) MsgLengthPack-12 2.30µs ±12% 2.26µs ±16% ~ (p=0.739 n=10+10) MsgLengthMassive-12 32.9µs ± 7% 32.0µs ±10% ~ (p=0.243 n=9+10) MsgLengthOnlyQuestion-12 9.60ns ± 1% 9.20ns ± 1% -4.16% (p=0.000 n=9+9) * Remove stray newline from TestMsgCompressionMultipleQuestions * Remove stray newline in length_test.go This was introduced when resolving merge conflicts.
2018-11-29 23:33:41 +00:00
fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name)
fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n")
for i := 1; i < st.NumFields(); i++ {
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`:
// ignored
Properly calculate compressed message lengths (#833) * Remove fullSize return from compressionLenSearch This wasn't used anywhere but TestCompressionLenSearch, and was very wrong. * Add generated compressedLen functions and use them This replaces the confusing and complicated compressionLenSlice function. * Use compressedLenWithCompressionMap even for uncompressed This leaves the len() functions unused and they'll soon be removed. This also fixes the off-by-one error of compressedLen when a (Q)NAME is ".". * Use Len helper instead of RR.len private method * Merge len and compressedLen functions * Merge compressedLen helper into Msg.Len * Remove compress bool from compressedLenWithCompressionMap * Merge map insertion into compressionLenSearch This eliminates the need to loop over the domain name twice when we're compressing the name. * Use compressedNameLen for NSEC.NextDomain This was a mistake. * Remove compress from RR.len * Add test case for multiple questions length * Add test case for MINFO and SOA compression These are the only RRs with multiple compressible names within the same RR, and they were previously broken. * Rename compressedNameLen to domainNameLen It also handles the length of uncompressed domain names. * Use off directly instead of len(s[:off]) * Move initial maxCompressionOffset check out of compressionLenMapInsert This should allow us to avoid the call overhead of compressionLenMapInsert in certain limited cases and may result in a slight performance increase. compressionLenMapInsert still has a maxCompressionOffset check inside the for loop. * Rename compressedLenWithCompressionMap to msgLenWithCompressionMap This better reflects that it also calculates the uncompressed length. * Merge TestMsgCompressMINFO with TestMsgCompressSOA They're both testing the same thing. * Remove compressionLenMapInsert compressionLenSearch does everything compressionLenMapInsert did anyway. * Only call compressionLenSearch in one place in domainNameLen * Split if statement in domainNameLen The last two commits worsened the performance of domainNameLen noticably, this change restores it's original performance. name old time/op new time/op delta MsgLength-12 550ns ±13% 510ns ±21% ~ (p=0.050 n=10+10) MsgLengthNoCompression-12 26.9ns ± 2% 27.0ns ± 1% ~ (p=0.198 n=9+10) MsgLengthPack-12 2.30µs ±12% 2.26µs ±16% ~ (p=0.739 n=10+10) MsgLengthMassive-12 32.9µs ± 7% 32.0µs ±10% ~ (p=0.243 n=9+10) MsgLengthOnlyQuestion-12 9.60ns ± 1% 9.20ns ± 1% -4.16% (p=0.000 n=9+9) * Remove stray newline from TestMsgCompressionMultipleQuestions * Remove stray newline in length_test.go This was introduced when resolving merge conflicts.
2018-11-29 23:33:41 +00:00
case `dns:"cdomain-name"`:
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n")
case `dns:"domain-name"`:
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n")
case `dns:"txt"`:
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch {
case st.Tag(i) == `dns:"-"`:
// ignored
Properly calculate compressed message lengths (#833) * Remove fullSize return from compressionLenSearch This wasn't used anywhere but TestCompressionLenSearch, and was very wrong. * Add generated compressedLen functions and use them This replaces the confusing and complicated compressionLenSlice function. * Use compressedLenWithCompressionMap even for uncompressed This leaves the len() functions unused and they'll soon be removed. This also fixes the off-by-one error of compressedLen when a (Q)NAME is ".". * Use Len helper instead of RR.len private method * Merge len and compressedLen functions * Merge compressedLen helper into Msg.Len * Remove compress bool from compressedLenWithCompressionMap * Merge map insertion into compressionLenSearch This eliminates the need to loop over the domain name twice when we're compressing the name. * Use compressedNameLen for NSEC.NextDomain This was a mistake. * Remove compress from RR.len * Add test case for multiple questions length * Add test case for MINFO and SOA compression These are the only RRs with multiple compressible names within the same RR, and they were previously broken. * Rename compressedNameLen to domainNameLen It also handles the length of uncompressed domain names. * Use off directly instead of len(s[:off]) * Move initial maxCompressionOffset check out of compressionLenMapInsert This should allow us to avoid the call overhead of compressionLenMapInsert in certain limited cases and may result in a slight performance increase. compressionLenMapInsert still has a maxCompressionOffset check inside the for loop. * Rename compressedLenWithCompressionMap to msgLenWithCompressionMap This better reflects that it also calculates the uncompressed length. * Merge TestMsgCompressMINFO with TestMsgCompressSOA They're both testing the same thing. * Remove compressionLenMapInsert compressionLenSearch does everything compressionLenMapInsert did anyway. * Only call compressionLenSearch in one place in domainNameLen * Split if statement in domainNameLen The last two commits worsened the performance of domainNameLen noticably, this change restores it's original performance. name old time/op new time/op delta MsgLength-12 550ns ±13% 510ns ±21% ~ (p=0.050 n=10+10) MsgLengthNoCompression-12 26.9ns ± 2% 27.0ns ± 1% ~ (p=0.198 n=9+10) MsgLengthPack-12 2.30µs ±12% 2.26µs ±16% ~ (p=0.739 n=10+10) MsgLengthMassive-12 32.9µs ± 7% 32.0µs ±10% ~ (p=0.243 n=9+10) MsgLengthOnlyQuestion-12 9.60ns ± 1% 9.20ns ± 1% -4.16% (p=0.000 n=9+9) * Remove stray newline from TestMsgCompressionMultipleQuestions * Remove stray newline in length_test.go This was introduced when resolving merge conflicts.
2018-11-29 23:33:41 +00:00
case st.Tag(i) == `dns:"cdomain-name"`:
o("l += domainNameLen(rr.%s, off+l, compression, true)\n")
case st.Tag(i) == `dns:"domain-name"`:
o("l += domainNameLen(rr.%s, off+l, compression, false)\n")
case st.Tag(i) == `dns:"octet"`:
o("l += len(rr.%s)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
fallthrough
case st.Tag(i) == `dns:"base64"`:
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
o("l += len(rr.%s)/2\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
fallthrough
case st.Tag(i) == `dns:"hex"`:
o("l += len(rr.%s)/2 + 1\n")
case st.Tag(i) == `dns:"any"`:
o("l += len(rr.%s)\n")
case st.Tag(i) == `dns:"a"`:
o("l += net.IPv4len // %s\n")
case st.Tag(i) == `dns:"aaaa"`:
o("l += net.IPv6len // %s\n")
case st.Tag(i) == `dns:"txt"`:
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
case st.Tag(i) == `dns:"uint48"`:
o("l += 6 // %s\n")
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
2017-02-15 20:40:16 +00:00
o("l++ // %s\n")
case types.Uint16:
o("l += 2 // %s\n")
case types.Uint32:
o("l += 4 // %s\n")
case types.Uint64:
o("l += 8 // %s\n")
case types.String:
o("l += len(rr.%s) + 1\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
}
fmt.Fprintf(b, "return l }\n")
}
// Generate copy()
fmt.Fprint(b, "// copy() functions\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
fields := []string{"rr.Hdr"}
for i := 1; i < st.NumFields(); i++ {
f := st.Field(i).Name()
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
t := sl.Underlying().String()
t = strings.TrimPrefix(t, "[]")
if strings.Contains(t, ".") {
splits := strings.Split(t, ".")
t = splits[len(splits)-1]
}
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
f, t, f, f, f)
fields = append(fields, f)
continue
}
if st.Field(i).Type().String() == "net.IP" {
fields = append(fields, "copyIP(rr."+f+")")
continue
}
fields = append(fields, "rr."+f)
}
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
fmt.Fprintf(b, "}\n")
}
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("ztypes.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}