dns/privaterr.go

153 lines
4.1 KiB
Go
Raw Normal View History

package dns
import (
"fmt"
"strings"
)
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
// String returns the text presentaton of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
// Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer.
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
Unpack([]byte) (int, error)
// Copy copies the Rdata.
Copy(PrivateRdata) error
2014-09-22 03:21:34 +10:00
// Len returns the length in octets of the Rdata.
Len() int
}
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
// It mocks normal RRs and implements dns.RR interface.
type PrivateRR struct {
Hdr RR_Header
Data PrivateRdata
}
func mkPrivateRR(rrtype uint16) *PrivateRR {
// Panics if RR is not an instance of PrivateRR.
2015-10-17 03:01:49 +11:00
rrfunc, ok := TypeToRR[rrtype]
if !ok {
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
}
anyrr := rrfunc()
switch rr := anyrr.(type) {
case *PrivateRR:
return rr
}
2015-10-17 03:01:49 +11:00
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
}
2015-08-23 17:03:13 +10:00
// Header return the RR header of r.
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
2015-08-23 17:03:13 +10:00
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface.
Properly calculate compressed message lengths (#833) * Remove fullSize return from compressionLenSearch This wasn't used anywhere but TestCompressionLenSearch, and was very wrong. * Add generated compressedLen functions and use them This replaces the confusing and complicated compressionLenSlice function. * Use compressedLenWithCompressionMap even for uncompressed This leaves the len() functions unused and they'll soon be removed. This also fixes the off-by-one error of compressedLen when a (Q)NAME is ".". * Use Len helper instead of RR.len private method * Merge len and compressedLen functions * Merge compressedLen helper into Msg.Len * Remove compress bool from compressedLenWithCompressionMap * Merge map insertion into compressionLenSearch This eliminates the need to loop over the domain name twice when we're compressing the name. * Use compressedNameLen for NSEC.NextDomain This was a mistake. * Remove compress from RR.len * Add test case for multiple questions length * Add test case for MINFO and SOA compression These are the only RRs with multiple compressible names within the same RR, and they were previously broken. * Rename compressedNameLen to domainNameLen It also handles the length of uncompressed domain names. * Use off directly instead of len(s[:off]) * Move initial maxCompressionOffset check out of compressionLenMapInsert This should allow us to avoid the call overhead of compressionLenMapInsert in certain limited cases and may result in a slight performance increase. compressionLenMapInsert still has a maxCompressionOffset check inside the for loop. * Rename compressedLenWithCompressionMap to msgLenWithCompressionMap This better reflects that it also calculates the uncompressed length. * Merge TestMsgCompressMINFO with TestMsgCompressSOA They're both testing the same thing. * Remove compressionLenMapInsert compressionLenSearch does everything compressionLenMapInsert did anyway. * Only call compressionLenSearch in one place in domainNameLen * Split if statement in domainNameLen The last two commits worsened the performance of domainNameLen noticably, this change restores it's original performance. name old time/op new time/op delta MsgLength-12 550ns ±13% 510ns ±21% ~ (p=0.050 n=10+10) MsgLengthNoCompression-12 26.9ns ± 2% 27.0ns ± 1% ~ (p=0.198 n=9+10) MsgLengthPack-12 2.30µs ±12% 2.26µs ±16% ~ (p=0.739 n=10+10) MsgLengthMassive-12 32.9µs ± 7% 32.0µs ±10% ~ (p=0.243 n=9+10) MsgLengthOnlyQuestion-12 9.60ns ± 1% 9.20ns ± 1% -4.16% (p=0.000 n=9+9) * Remove stray newline from TestMsgCompressionMultipleQuestions * Remove stray newline in length_test.go This was introduced when resolving merge conflicts.
2018-11-30 10:33:41 +11:00
func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
l := r.Hdr.len(off, compression)
l += r.Data.Len()
return l
}
func (r *PrivateRR) copy() RR {
// make new RR like this:
rr := mkPrivateRR(r.Hdr.Rrtype)
rr.Hdr = r.Hdr
err := r.Data.Copy(rr.Data)
if err != nil {
panic("dns: got value that could not be used to copy Private rdata")
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
off, err := r.Hdr.pack(msg, off, compression, compress)
if err != nil {
return off, err
}
headerEnd := off
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
r.Header().Rdlength = uint16(off - headerEnd)
return off, nil
}
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
2015-10-17 03:01:49 +11:00
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
if noRdata(h) {
return &h, off, nil
}
var err error
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
off1, err := rr.Data.Unpack(msg[off:])
off += off1
if err != nil {
return rr, off, err
}
return rr, off, err
}
Eliminate lexer goroutines (#792) * Eliminate zlexer goroutine This replaces the zlexer goroutine and channels with a zlexer struct that maintains state and provides a channel-like API. * Eliminate klexer goroutine This replaces the klexer goroutine and channels with a klexer struct that maintains state and provides a channel-like API. * Merge scan into zlexer and klexer This does result in tokenText existing twice, but it's pretty simple and small so it's not that bad. * Avoid using text/scanner.Position to track position * Track escape within zlexer.Next * Avoid zl.commt check on space and tab in zlexer * Track stri within zlexer.Next * Track comi within zlexer.Next There is one special case at the start of a comment that needs to be handled, otherwise this is as simple as stri was. * Use a single token buffer in zlexer This is safe as there is never both a non-empty string buffer and a non-empty comment buffer. * Don't hardcode length of zl.tok in zlexer * Eliminate lex.length field This is always set to len(l.token) and is only queried in a few places. It was added in 47cc5b052df9b4e5d5a9900cfdd622607de10a6d without any obvious need. * Add whitespace to klexer.Next * Track lex within klexer.Next * Use a strings.Builder in klexer.Next * Simplify : case in klexer.Next * Add whitespace to zlexer.Next * Change for loop style in zlexer.Next and klexer.Next * Surface read errors in zlexer * Surface read errors from klexer * Remove debug line from parseKey * Rename tokenText to readByte * Make readByte return ok bool Also change the for loop style to match the Next for loops. * Make readByte errors sticky klexer.Next calls readByte separately from within the loop. Without readByte being sticky, an error that occurs during that readByte call may be lost. * Panic in testRR if the error is non-nil * Add whitespace and unify field setting in zlexer.Next * Remove eof fields from zlexer and klexer With readByte having sticky errors, this no longer needed. zl.eof = true was also in the wrong place and could mask an unbalanced brace error. * Merge zl.tok blocks in zlexer.Next * Split the tok buffer into separate string and comment buffers The invariant of stri > 0 && comi > 0 never being true was broken when x == '\n' && !zl.quote && zl.commt && zl.brace != 0 (the "If not in a brace this ends the comment AND the RR" block). Split the buffer back out into two separate buffers to avoid clobbering. * Replace token slices with arrays in zlexer * Add a NewRR benchmark * Move token buffers into zlexer.Next These don't need to be retained across Next calls and can be stack allocated inside Next. This drastically reduces memory consumption as they accounted for nearly half of all the memory used. name old alloc/op new alloc/op delta NewRR-12 9.72kB ± 0% 4.98kB ± 0% -48.72% (p=0.000 n=10+10) * Add a ReadRR benchmark Unlike NewRR, this will use an io.Reader that does not implement any methods aside from Read. In particular it does not implement io.ByteReader. * Avoid using a bufio.Reader for io.ByteReader readers At the same time use a smaller buffer size of 1KiB rather than the bufio.NewReader default of 4KiB. name old time/op new time/op delta NewRR-12 11.0µs ± 3% 9.5µs ± 2% -13.77% (p=0.000 n=9+10) ReadRR-12 11.2µs ±16% 9.8µs ± 1% -13.03% (p=0.000 n=10+10) name old alloc/op new alloc/op delta NewRR-12 4.98kB ± 0% 0.81kB ± 0% -83.79% (p=0.000 n=10+10) ReadRR-12 4.87kB ± 0% 1.82kB ± 0% -62.73% (p=0.000 n=10+10) name old allocs/op new allocs/op delta NewRR-12 19.0 ± 0% 17.0 ± 0% -10.53% (p=0.000 n=10+10) ReadRR-12 19.0 ± 0% 19.0 ± 0% ~ (all equal) ReadRR-12 11.2µs ±16% 9.8µs ± 1% -13.03% (p=0.000 n=10+10) * Surface any remaining comment from zlexer.Next * Improve comment handling in zlexer.Next This both fixes a regression where comments could be lost under certain circumstances and now emits comments that occur within braces. * Remove outdated comment from zlexer.Next and klexer.Next * Delay converting LF to space in braced comment * Fixup TestParseZoneComments * Remove tokenUpper field from lex Not computing this for every token, and instead only when needed is a substantial performance improvement. name old time/op new time/op delta NewRR-12 9.56µs ± 0% 6.30µs ± 1% -34.08% (p=0.000 n=9+10) ReadRR-12 9.93µs ± 1% 6.67µs ± 1% -32.77% (p=0.000 n=10+10) name old alloc/op new alloc/op delta NewRR-12 824B ± 0% 808B ± 0% -1.94% (p=0.000 n=10+10) ReadRR-12 1.83kB ± 0% 1.82kB ± 0% -0.87% (p=0.000 n=10+10) name old allocs/op new allocs/op delta NewRR-12 17.0 ± 0% 17.0 ± 0% ~ (all equal) ReadRR-12 19.0 ± 0% 19.0 ± 0% ~ (all equal) * Update ParseZone documentation to match comment changes The zlexer code was changed to return comments more often, so update the ParseZone documentation to match.
2018-10-15 18:12:31 +11:00
setPrivateRR := func(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) {
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
Eliminate lexer goroutines (#792) * Eliminate zlexer goroutine This replaces the zlexer goroutine and channels with a zlexer struct that maintains state and provides a channel-like API. * Eliminate klexer goroutine This replaces the klexer goroutine and channels with a klexer struct that maintains state and provides a channel-like API. * Merge scan into zlexer and klexer This does result in tokenText existing twice, but it's pretty simple and small so it's not that bad. * Avoid using text/scanner.Position to track position * Track escape within zlexer.Next * Avoid zl.commt check on space and tab in zlexer * Track stri within zlexer.Next * Track comi within zlexer.Next There is one special case at the start of a comment that needs to be handled, otherwise this is as simple as stri was. * Use a single token buffer in zlexer This is safe as there is never both a non-empty string buffer and a non-empty comment buffer. * Don't hardcode length of zl.tok in zlexer * Eliminate lex.length field This is always set to len(l.token) and is only queried in a few places. It was added in 47cc5b052df9b4e5d5a9900cfdd622607de10a6d without any obvious need. * Add whitespace to klexer.Next * Track lex within klexer.Next * Use a strings.Builder in klexer.Next * Simplify : case in klexer.Next * Add whitespace to zlexer.Next * Change for loop style in zlexer.Next and klexer.Next * Surface read errors in zlexer * Surface read errors from klexer * Remove debug line from parseKey * Rename tokenText to readByte * Make readByte return ok bool Also change the for loop style to match the Next for loops. * Make readByte errors sticky klexer.Next calls readByte separately from within the loop. Without readByte being sticky, an error that occurs during that readByte call may be lost. * Panic in testRR if the error is non-nil * Add whitespace and unify field setting in zlexer.Next * Remove eof fields from zlexer and klexer With readByte having sticky errors, this no longer needed. zl.eof = true was also in the wrong place and could mask an unbalanced brace error. * Merge zl.tok blocks in zlexer.Next * Split the tok buffer into separate string and comment buffers The invariant of stri > 0 && comi > 0 never being true was broken when x == '\n' && !zl.quote && zl.commt && zl.brace != 0 (the "If not in a brace this ends the comment AND the RR" block). Split the buffer back out into two separate buffers to avoid clobbering. * Replace token slices with arrays in zlexer * Add a NewRR benchmark * Move token buffers into zlexer.Next These don't need to be retained across Next calls and can be stack allocated inside Next. This drastically reduces memory consumption as they accounted for nearly half of all the memory used. name old alloc/op new alloc/op delta NewRR-12 9.72kB ± 0% 4.98kB ± 0% -48.72% (p=0.000 n=10+10) * Add a ReadRR benchmark Unlike NewRR, this will use an io.Reader that does not implement any methods aside from Read. In particular it does not implement io.ByteReader. * Avoid using a bufio.Reader for io.ByteReader readers At the same time use a smaller buffer size of 1KiB rather than the bufio.NewReader default of 4KiB. name old time/op new time/op delta NewRR-12 11.0µs ± 3% 9.5µs ± 2% -13.77% (p=0.000 n=9+10) ReadRR-12 11.2µs ±16% 9.8µs ± 1% -13.03% (p=0.000 n=10+10) name old alloc/op new alloc/op delta NewRR-12 4.98kB ± 0% 0.81kB ± 0% -83.79% (p=0.000 n=10+10) ReadRR-12 4.87kB ± 0% 1.82kB ± 0% -62.73% (p=0.000 n=10+10) name old allocs/op new allocs/op delta NewRR-12 19.0 ± 0% 17.0 ± 0% -10.53% (p=0.000 n=10+10) ReadRR-12 19.0 ± 0% 19.0 ± 0% ~ (all equal) ReadRR-12 11.2µs ±16% 9.8µs ± 1% -13.03% (p=0.000 n=10+10) * Surface any remaining comment from zlexer.Next * Improve comment handling in zlexer.Next This both fixes a regression where comments could be lost under certain circumstances and now emits comments that occur within braces. * Remove outdated comment from zlexer.Next and klexer.Next * Delay converting LF to space in braced comment * Fixup TestParseZoneComments * Remove tokenUpper field from lex Not computing this for every token, and instead only when needed is a substantial performance improvement. name old time/op new time/op delta NewRR-12 9.56µs ± 0% 6.30µs ± 1% -34.08% (p=0.000 n=9+10) ReadRR-12 9.93µs ± 1% 6.67µs ± 1% -32.77% (p=0.000 n=10+10) name old alloc/op new alloc/op delta NewRR-12 824B ± 0% 808B ± 0% -1.94% (p=0.000 n=10+10) ReadRR-12 1.83kB ± 0% 1.82kB ± 0% -0.87% (p=0.000 n=10+10) name old allocs/op new allocs/op delta NewRR-12 17.0 ± 0% 17.0 ± 0% ~ (all equal) ReadRR-12 19.0 ± 0% 19.0 ± 0% ~ (all equal) * Update ParseZone documentation to match comment changes The zlexer code was changed to return comments more often, so update the ParseZone documentation to match.
2018-10-15 18:12:31 +11:00
switch l, _ = c.Next(); l.value {
2015-02-19 21:45:59 +11:00
case zNewline, zEOF:
break Fetch
2015-02-19 21:45:59 +11:00
case zString:
text = append(text, l.token)
}
}
err := rr.Data.Parse(text)
if err != nil {
return nil, &ParseError{f, err.Error(), l}, ""
}
return rr, nil, ""
}
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
}
Fix dominikh/go-tools nits (#758) * Remove unused functions and consts * Address gosimple nits * Address staticcheck nits This excludes several that were intentional or weren't actual errors. * Reduce size of lex struct This reduces the size of the lex struct by 8 bytes from: lex.token string: 0-16 (size 16, align 8) lex.tokenUpper string: 16-32 (size 16, align 8) lex.length int: 32-40 (size 8, align 8) lex.err bool: 40-41 (size 1, align 1) lex.value uint8: 41-42 (size 1, align 1) padding: 42-48 (size 6, align 0) lex.line int: 48-56 (size 8, align 8) lex.column int: 56-64 (size 8, align 8) lex.torc uint16: 64-66 (size 2, align 2) padding: 66-72 (size 6, align 0) lex.comment string: 72-88 (size 16, align 8) to: lex.token string: 0-16 (size 16, align 8) lex.tokenUpper string: 16-32 (size 16, align 8) lex.length int: 32-40 (size 8, align 8) lex.err bool: 40-41 (size 1, align 1) lex.value uint8: 41-42 (size 1, align 1) lex.torc uint16: 42-44 (size 2, align 2) padding: 44-48 (size 4, align 0) lex.line int: 48-56 (size 8, align 8) lex.column int: 56-64 (size 8, align 8) lex.comment string: 64-80 (size 16, align 8) * Reduce size of response struct This reduces the size of the response struct by 8 bytes from: response.msg []byte: 0-24 (size 24, align 8) response.hijacked bool: 24-25 (size 1, align 1) padding: 25-32 (size 7, align 0) response.tsigStatus error: 32-48 (size 16, align 8) response.tsigTimersOnly bool: 48-49 (size 1, align 1) padding: 49-56 (size 7, align 0) response.tsigRequestMAC string: 56-72 (size 16, align 8) response.tsigSecret map[string]string: 72-80 (size 8, align 8) response.udp *net.UDPConn: 80-88 (size 8, align 8) response.tcp net.Conn: 88-104 (size 16, align 8) response.udpSession *github.com/tmthrgd/dns.SessionUDP: 104-112 (size 8, align 8) response.writer github.com/tmthrgd/dns.Writer: 112-128 (size 16, align 8) response.wg *sync.WaitGroup: 128-136 (size 8, align 8) to: response.msg []byte: 0-24 (size 24, align 8) response.hijacked bool: 24-25 (size 1, align 1) response.tsigTimersOnly bool: 25-26 (size 1, align 1) padding: 26-32 (size 6, align 0) response.tsigStatus error: 32-48 (size 16, align 8) response.tsigRequestMAC string: 48-64 (size 16, align 8) response.tsigSecret map[string]string: 64-72 (size 8, align 8) response.udp *net.UDPConn: 72-80 (size 8, align 8) response.tcp net.Conn: 80-96 (size 16, align 8) response.udpSession *github.com/tmthrgd/dns.SessionUDP: 96-104 (size 8, align 8) response.writer github.com/tmthrgd/dns.Writer: 104-120 (size 16, align 8) response.wg *sync.WaitGroup: 120-128 (size 8, align 8)
2018-09-27 04:32:05 +10:00
// PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
2015-10-17 03:01:49 +11:00
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(typeToparserFunc, rtype)
delete(StringToType, rtypestr)
delete(typeToUnpack, rtype)
}
}