dns/zscan.go

756 lines
17 KiB
Go
Raw Normal View History

2011-12-14 08:26:31 +00:00
package dns
2011-12-14 08:00:39 +00:00
import (
"fmt"
2011-12-14 08:26:31 +00:00
"io"
"os"
2011-12-14 08:00:39 +00:00
"strconv"
"strings"
)
2011-12-16 09:30:42 +00:00
// Only used when debugging the parser itself.
2012-02-12 21:24:18 +00:00
var _DEBUG = false
2012-01-27 23:35:37 +00:00
// Complete unsure about the correctness of this value?
// Large blobs of base64 code might get longer than this....
2012-02-23 19:13:37 +00:00
const maxTok = 2048
2011-12-16 09:30:42 +00:00
2011-12-14 08:00:39 +00:00
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
2011-12-15 11:27:05 +00:00
// * Add ownernames if they are left blank;
2011-12-14 08:00:39 +00:00
// * Suppress sequences of spaces;
// * Make each RR fit on one line (NEWLINE is send as last)
// * Handle comments: ;
2011-12-15 11:27:05 +00:00
// * Handle braces.
2011-12-14 08:00:39 +00:00
const (
2011-12-15 21:40:07 +00:00
// Zonefile
2012-03-03 16:40:30 +00:00
_EOF = iota
2011-12-14 14:37:36 +00:00
_STRING
2011-12-14 08:00:39 +00:00
_BLANK
2012-02-12 21:24:18 +00:00
_QUOTE
2011-12-14 08:00:39 +00:00
_NEWLINE
_RRTYPE
_OWNER
_CLASS
2011-12-18 16:58:06 +00:00
_DIRORIGIN // $ORIGIN
_DIRTTL // $TTL
_DIRINCLUDE // $INCLUDE
2011-12-14 08:00:39 +00:00
2011-12-15 21:40:07 +00:00
// Privatekey file
_VALUE
_KEY
2011-12-18 16:58:06 +00:00
_EXPECT_OWNER_DIR // Ownername
2011-12-14 14:37:36 +00:00
_EXPECT_OWNER_BL // Whitespace after the ownername
_EXPECT_ANY // Expect rrtype, ttl or class
_EXPECT_ANY_NOCLASS // Expect rrtype or ttl
_EXPECT_ANY_NOCLASS_BL // The Whitespace after _EXPECT_ANY_NOCLASS
_EXPECT_ANY_NOTTL // Expect rrtype or class
_EXPECT_ANY_NOTTL_BL // Whitespace after _EXPECT_ANY_NOTTL
_EXPECT_RRTYPE // Expect rrtype
_EXPECT_RRTYPE_BL // Whitespace BEFORE rrtype
_EXPECT_RDATA // The first element of the rdata
2011-12-18 16:58:06 +00:00
_EXPECT_DIRTTL_BL // Space after directive $TTL
_EXPECT_DIRTTL // Directive $TTL
_EXPECT_DIRORIGIN_BL // Space after directive $ORIGIN
_EXPECT_DIRORIGIN // Directive $ORIGIN
_EXPECT_DIRINCLUDE_BL // Space after directive $INCLUDE
_EXPECT_DIRINCLUDE // Directive $INCLUDE
2011-12-14 08:00:39 +00:00
)
2011-12-16 18:42:23 +00:00
// ParseError contains the parse error and the location in the io.Reader
// where the error occured.
type ParseError struct {
file string
err string
lex lex
}
func (e *ParseError) Error() (s string) {
if e.file != "" {
s = e.file + ": "
}
2012-03-03 16:40:30 +00:00
s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
return
}
2011-12-16 18:34:30 +00:00
type lex struct {
token string // Text of the token
2012-02-23 19:13:37 +00:00
err bool // When true, token text has lexer error
2012-02-19 19:51:04 +00:00
value uint8 // Value: _STRING, _BLANK, etc.
line int // Line in the file
column int // Column in the fil
2012-02-27 20:12:04 +00:00
torc uint16 // Type or class as parsed in the lexer, we only need to look this up in the grammar
2011-12-14 08:00:39 +00:00
}
2012-02-13 16:52:53 +00:00
// Tokens are returned when a zone file is parsed.
type Token struct {
2012-02-13 16:52:53 +00:00
RR // the scanned resource record when error is not nil
Error *ParseError // when an error occured, this has the error specifics
}
2012-01-22 19:20:30 +00:00
// NewRR reads the RR contained in the string s. Only the first RR is returned.
2012-02-15 11:50:23 +00:00
// The class defaults to IN and TTL defaults to DefaultTtl. The full zone file
// syntax like $TTL, $ORIGIN, etc. is supported.
func NewRR(s string) (RR, error) {
2011-12-15 21:40:07 +00:00
if s[len(s)-1] != '\n' { // We need a closing newline
return ReadRR(strings.NewReader(s+"\n"), "")
}
return ReadRR(strings.NewReader(s), "")
2012-01-22 19:20:30 +00:00
}
// ReadRR reads the RR contained in q. Only the first RR is returned.
// The class defaults to IN and TTL defaults to DefaultTtl.
2012-01-22 19:20:30 +00:00
func ReadRR(q io.Reader, filename string) (RR, error) {
r := <-ParseZone(q, ".", filename)
if r.Error != nil {
return nil, r.Error
}
return r.RR, nil
}
2012-02-13 16:52:53 +00:00
// ParseZone reads a RFC 1035 zone from r. It returns Tokens on the
// returned channel, which consist out the parsed RR or an error.
2012-02-15 11:50:23 +00:00
// If there is an error the RR is nil. The string file is only used
// in error reporting. The string origin is used as the initial origin, as
// if the file would start with: $ORIGIN origin
2012-02-13 16:52:53 +00:00
// The channel t is closed by ParseZone when the end of r is reached.
func ParseZone(r io.Reader, origin, file string) chan Token {
t := make(chan Token)
go parseZone(r, origin, file, t, 0)
return t
2011-12-19 18:20:55 +00:00
}
func parseZone(r io.Reader, origin, f string, t chan Token, include int) {
defer func() {
if include == 0 {
close(t)
}
}()
s := scanInit(r)
2011-12-16 18:34:30 +00:00
c := make(chan lex)
// Start the lexer
go zlexer(s, c)
// 6 possible beginnings of a line, _ is a space
2012-02-19 19:51:04 +00:00
// 0. _RRTYPE -> all omitted until the rrtype
2011-12-14 08:00:39 +00:00
// 1. _OWNER _ _RRTYPE -> class/ttl omitted
// 2. _OWNER _ _STRING _ _RRTYPE -> class omitted
// 3. _OWNER _ _STRING _ _CLASS _ _RRTYPE -> ttl/class
// 4. _OWNER _ _CLASS _ _RRTYPE -> ttl omitted
// 5. _OWNER _ _CLASS _ _STRING _ _RRTYPE -> class/ttl (reversed)
// After detecting these, we know the _RRTYPE so we can jump to functions
// handling the rdata for each of these types.
if origin == "" {
origin = "."
}
2012-02-18 18:24:53 +00:00
if !IsFqdn(origin) {
t <- Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
return
}
if _, _, ok := IsDomainName(origin); !ok {
2012-02-15 11:50:23 +00:00
t <- Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
return
}
2012-02-18 18:24:53 +00:00
st := _EXPECT_OWNER_DIR // initial state
2011-12-14 08:26:31 +00:00
var h RR_Header
2011-12-18 16:58:06 +00:00
var defttl uint32 = DefaultTtl
var prevName string
2011-12-14 08:00:39 +00:00
for l := range c {
2011-12-16 09:30:42 +00:00
if _DEBUG {
2011-12-14 14:37:36 +00:00
fmt.Printf("[%v]\n", l)
}
// Lexer spotted an error already
2012-02-23 19:13:37 +00:00
if l.err == true {
t <- Token{Error: &ParseError{f, l.token, l}}
return
}
2011-12-14 08:00:39 +00:00
switch st {
2011-12-18 16:58:06 +00:00
case _EXPECT_OWNER_DIR:
// We can also expect a directive, like $TTL or $ORIGIN
h.Ttl = defttl
h.Class = ClassINET
2011-12-14 08:00:39 +00:00
switch l.value {
2011-12-14 08:26:31 +00:00
case _NEWLINE: // Empty line
2011-12-18 16:58:06 +00:00
st = _EXPECT_OWNER_DIR
2011-12-14 08:00:39 +00:00
case _OWNER:
h.Name = l.token
2012-02-19 19:51:04 +00:00
if l.token[0] == '@' {
h.Name = origin
prevName = h.Name
st = _EXPECT_OWNER_BL
break
}
_, ld, ok := IsDomainName(l.token)
if !ok {
t <- Token{Error: &ParseError{f, "bad owner name", l}}
2012-01-12 21:49:26 +00:00
return
}
if h.Name[ld-1] != '.' {
h.Name = appendOrigin(h.Name, origin)
2012-01-12 21:49:26 +00:00
}
prevName = h.Name
2011-12-14 08:00:39 +00:00
st = _EXPECT_OWNER_BL
2011-12-18 16:58:06 +00:00
case _DIRTTL:
st = _EXPECT_DIRTTL_BL
case _DIRORIGIN:
st = _EXPECT_DIRORIGIN_BL
case _DIRINCLUDE:
st = _EXPECT_DIRINCLUDE_BL
case _RRTYPE: // Everthing has been omitted, this is the first thing on the line
h.Name = prevName
2012-02-27 20:12:04 +00:00
h.Rrtype = l.torc
st = _EXPECT_RDATA
2012-02-27 20:12:04 +00:00
case _CLASS: // First thing on the line is the class
h.Name = prevName
2012-02-27 20:12:04 +00:00
h.Class = l.torc
st = _EXPECT_ANY_NOCLASS_BL
case _BLANK:
// Discard, can happen when there is nothing on the
// line except the RR type
2012-02-27 20:12:04 +00:00
case _STRING: // First thing on the is the ttl
if ttl, ok := stringToTtl(l, f); !ok {
t <- Token{Error: &ParseError{f, "not a TTL", l}}
return
} else {
h.Ttl = ttl
defttl = ttl
}
st = _EXPECT_ANY_NOTTL_BL
2012-02-27 20:12:04 +00:00
2011-12-14 08:00:39 +00:00
default:
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "syntax error at beginning", l}}
return
}
case _EXPECT_DIRINCLUDE_BL:
if l.value != _BLANK {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
return
2011-12-14 08:00:39 +00:00
}
st = _EXPECT_DIRINCLUDE
case _EXPECT_DIRINCLUDE:
if l.value != _STRING {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
return
}
if e := slurpRemainder(c, f); e != nil {
t <- Token{Error: e}
}
// Start with the new file
2012-01-21 22:42:33 +00:00
r1, e1 := os.Open(l.token)
if e1 != nil {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
2012-01-21 22:42:33 +00:00
return
}
if include+1 > 7 {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
return
}
parseZone(r1, l.token, origin, t, include+1)
2012-01-21 22:42:33 +00:00
st = _EXPECT_OWNER_DIR
2011-12-18 16:58:06 +00:00
case _EXPECT_DIRTTL_BL:
if l.value != _BLANK {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
2011-12-18 16:58:06 +00:00
return
}
st = _EXPECT_DIRTTL
case _EXPECT_DIRTTL:
if l.value != _STRING {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
2011-12-18 16:58:06 +00:00
return
}
if e := slurpRemainder(c, f); e != nil {
t <- Token{Error: e}
return
}
2012-02-15 08:04:09 +00:00
if ttl, ok := stringToTtl(l, f); !ok {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
2011-12-18 16:58:06 +00:00
return
} else {
defttl = ttl
}
st = _EXPECT_OWNER_DIR
2012-01-12 21:49:26 +00:00
case _EXPECT_DIRORIGIN_BL:
if l.value != _BLANK {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
2012-01-12 21:49:26 +00:00
return
}
st = _EXPECT_DIRORIGIN
case _EXPECT_DIRORIGIN:
if l.value != _STRING {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
2012-01-12 21:49:26 +00:00
return
}
if e := slurpRemainder(c, f); e != nil {
t <- Token{Error: e}
}
2012-01-12 21:49:26 +00:00
if !IsFqdn(l.token) {
if origin != "." { // Prevent .. endings
origin = l.token + "." + origin
} else {
origin = l.token + origin
}
2012-01-12 21:49:26 +00:00
} else {
origin = l.token
2012-01-12 21:49:26 +00:00
}
2012-02-12 21:24:18 +00:00
st = _EXPECT_OWNER_DIR
2011-12-14 08:26:31 +00:00
case _EXPECT_OWNER_BL:
2011-12-14 08:00:39 +00:00
if l.value != _BLANK {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "no blank after owner", l}}
return
2011-12-14 08:00:39 +00:00
}
st = _EXPECT_ANY
2011-12-14 08:26:31 +00:00
case _EXPECT_ANY:
2011-12-14 08:00:39 +00:00
switch l.value {
case _RRTYPE:
2012-02-27 20:12:04 +00:00
h.Rrtype = l.torc
2011-12-14 14:37:36 +00:00
st = _EXPECT_RDATA
2011-12-14 08:00:39 +00:00
case _CLASS:
2012-02-27 20:12:04 +00:00
h.Class = l.torc
2011-12-14 13:35:45 +00:00
st = _EXPECT_ANY_NOCLASS_BL
2011-12-14 08:26:31 +00:00
case _STRING: // TTL is this case
2012-02-15 08:04:09 +00:00
if ttl, ok := stringToTtl(l, f); !ok {
2012-02-18 18:24:53 +00:00
t <- Token{Error: &ParseError{f, "not a TTL", l}}
return
2011-12-14 08:00:39 +00:00
} else {
2011-12-18 16:58:06 +00:00
h.Ttl = ttl
defttl = ttl
2011-12-14 08:00:39 +00:00
}
st = _EXPECT_ANY_NOTTL_BL
default:
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
return
2011-12-14 08:00:39 +00:00
}
2011-12-14 13:35:45 +00:00
case _EXPECT_ANY_NOCLASS_BL:
2011-12-14 08:00:39 +00:00
if l.value != _BLANK {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "no blank before class", l}}
return
2011-12-14 08:00:39 +00:00
}
2011-12-14 13:35:45 +00:00
st = _EXPECT_ANY_NOCLASS
2011-12-14 08:00:39 +00:00
case _EXPECT_ANY_NOTTL_BL:
if l.value != _BLANK {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "no blank before TTL", l}}
return
2011-12-14 08:00:39 +00:00
}
st = _EXPECT_ANY_NOTTL
case _EXPECT_ANY_NOTTL:
switch l.value {
case _CLASS:
2012-02-27 20:12:04 +00:00
h.Class = l.torc
2011-12-14 08:00:39 +00:00
st = _EXPECT_RRTYPE_BL
case _RRTYPE:
2012-02-27 20:12:04 +00:00
h.Rrtype = l.torc
2011-12-14 14:37:36 +00:00
st = _EXPECT_RDATA
2012-03-04 09:41:16 +00:00
default:
t <- Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
2012-03-03 16:40:30 +00:00
return
2011-12-14 08:00:39 +00:00
}
2011-12-14 13:35:45 +00:00
case _EXPECT_ANY_NOCLASS:
2011-12-14 08:00:39 +00:00
switch l.value {
case _STRING: // TTL
2012-02-15 08:04:09 +00:00
if ttl, ok := stringToTtl(l, f); !ok {
2012-02-18 18:24:53 +00:00
t <- Token{Error: &ParseError{f, "not a TTL", l}}
return
2011-12-14 08:00:39 +00:00
} else {
2011-12-18 16:58:06 +00:00
h.Ttl = ttl
defttl = ttl
2011-12-14 08:00:39 +00:00
}
2011-12-15 10:22:54 +00:00
st = _EXPECT_RRTYPE_BL
2011-12-14 08:00:39 +00:00
case _RRTYPE:
2012-02-27 20:12:04 +00:00
h.Rrtype = l.torc
2011-12-14 14:37:36 +00:00
st = _EXPECT_RDATA
2011-12-14 08:00:39 +00:00
default:
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
return
2011-12-14 08:00:39 +00:00
}
case _EXPECT_RRTYPE_BL:
if l.value != _BLANK {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "no blank before RR type", l}}
return
2011-12-14 08:00:39 +00:00
}
st = _EXPECT_RRTYPE
case _EXPECT_RRTYPE:
if l.value != _RRTYPE {
2012-02-15 22:04:46 +00:00
t <- Token{Error: &ParseError{f, "unknown RR type", l}}
return
2011-12-14 08:00:39 +00:00
}
2012-02-27 20:12:04 +00:00
h.Rrtype = l.torc
2011-12-14 08:00:39 +00:00
st = _EXPECT_RDATA
case _EXPECT_RDATA:
// I could save my token here...? l
r, e := setRR(h, c, origin, f)
if e != nil {
2011-12-16 18:34:30 +00:00
// If e.lex is nil than we have encounter a unknown RR type
// in that case we substitute our current lex token
2011-12-16 13:48:30 +00:00
if e.lex.token == "" && e.lex.value == 0 {
e.lex = l // Uh, dirty
}
2011-12-16 09:26:32 +00:00
t <- Token{Error: e}
return
2011-12-14 08:00:39 +00:00
}
2012-01-12 10:17:01 +00:00
t <- Token{RR: r}
2011-12-18 16:58:06 +00:00
st = _EXPECT_OWNER_DIR
2011-12-14 08:00:39 +00:00
}
}
2012-03-03 16:40:30 +00:00
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything
if h.Rrtype == 0 {
t <- Token{Error: &ParseError{f, "nothing made sense", lex{}}}
}
2011-12-14 08:00:39 +00:00
}
func (l lex) _string() string {
2011-12-14 08:00:39 +00:00
switch l.value {
case _STRING:
return "S:" + l.token + "$"
2011-12-14 08:00:39 +00:00
case _BLANK:
return "_"
2012-02-12 21:24:18 +00:00
case _QUOTE:
return "\""
2011-12-14 08:00:39 +00:00
case _NEWLINE:
return "|"
2011-12-14 08:00:39 +00:00
case _RRTYPE:
return "R:" + l.token + "$"
2011-12-14 08:00:39 +00:00
case _OWNER:
return "O:" + l.token + "$"
2011-12-14 08:00:39 +00:00
case _CLASS:
return "C:" + l.token + "$"
2011-12-18 16:58:06 +00:00
case _DIRTTL:
return "$T:" + l.token + "$"
case _DIRORIGIN:
return "$O:" + l.token + "$"
case _DIRINCLUDE:
return "$I:" + l.token + "$"
2011-12-14 08:00:39 +00:00
}
return "**"
2011-12-14 08:00:39 +00:00
}
// zlexer scans the sourcefile and returns tokens on the channel c.
func zlexer(s *scan, c chan lex) {
2011-12-16 18:34:30 +00:00
var l lex
str := make([]byte, maxTok) // Should be enough for any token
2012-01-27 23:35:37 +00:00
stri := 0 // Offset in str (0 means empty)
2011-12-14 08:00:39 +00:00
quote := false
2011-12-18 18:59:01 +00:00
escape := false
2011-12-14 08:00:39 +00:00
space := false
commt := false
rrtype := false
owner := true
brace := 0
x, err := s.tokenText()
defer close(c)
for err == nil {
l.column = s.position.Column
l.line = s.position.Line
2012-01-27 23:35:37 +00:00
if stri > maxTok {
2012-02-23 19:13:37 +00:00
l.token = "tok length insufficient for parsing"
l.err = true
2012-01-27 23:35:37 +00:00
c <- l
return
}
switch x {
case ' ', '\t':
if quote {
// Inside quotes this is legal
str[stri] = x
stri++
break
}
escape = false
2011-12-14 08:00:39 +00:00
if commt {
break
}
if stri == 0 {
// Space directly as the beginnin, handled in the grammar
2011-12-14 08:00:39 +00:00
} else if owner {
2011-12-14 08:26:31 +00:00
// If we have a string and its the first, make it an owner
2011-12-14 08:00:39 +00:00
l.value = _OWNER
l.token = string(str[:stri])
// escape $... start with a \ not a $, so this will work
2012-02-19 18:57:01 +00:00
switch l.token {
2012-01-21 22:42:33 +00:00
case "$TTL":
2011-12-18 18:59:01 +00:00
l.value = _DIRTTL
2012-01-21 22:42:33 +00:00
case "$ORIGIN":
2011-12-18 18:59:01 +00:00
l.value = _DIRORIGIN
2012-01-21 22:42:33 +00:00
case "$INCLUDE":
l.value = _DIRINCLUDE
2012-01-21 22:42:33 +00:00
}
2011-12-14 08:00:39 +00:00
c <- l
} else {
l.value = _STRING
l.token = string(str[:stri])
2011-12-14 08:00:39 +00:00
2011-12-14 08:26:31 +00:00
if !rrtype {
2012-02-27 20:12:04 +00:00
if t, ok := Str_rr[strings.ToUpper(l.token)]; ok {
2011-12-14 08:26:31 +00:00
l.value = _RRTYPE
2012-02-27 20:12:04 +00:00
l.torc = t
rrtype = true
} else {
2012-02-27 20:12:04 +00:00
if strings.HasPrefix("TYPE", l.token) {
if t, ok := typeToInt(l.token); !ok {
l.token = "unknown RR type"
l.err = true
c <- l
return
} else {
l.value = _RRTYPE
l.torc = t
}
}
}
2012-02-27 20:12:04 +00:00
if t, ok := Str_class[strings.ToUpper(l.token)]; ok {
2011-12-14 08:26:31 +00:00
l.value = _CLASS
2012-02-27 20:12:04 +00:00
l.torc = t
} else {
2012-02-27 20:12:04 +00:00
if strings.HasPrefix("CLASS", l.token) {
if t, ok := classToInt(l.token); !ok {
l.token = "unknown class"
l.err = true
c <- l
return
} else {
l.value = _CLASS
l.torc = t
}
}
2011-12-14 08:26:31 +00:00
}
2011-12-14 08:00:39 +00:00
}
c <- l
}
stri = 0
// I reverse space stuff here
2011-12-14 08:00:39 +00:00
if !space && !commt {
l.value = _BLANK
l.token = " "
c <- l
}
owner = false
space = true
case ';':
if quote {
// Inside quotes this is legal
str[stri] = x
stri++
break
}
if escape {
escape = false
str[stri] = x
stri++
2011-12-14 08:00:39 +00:00
break
}
2012-02-12 21:24:18 +00:00
if stri > 0 {
l.value = _STRING
l.token = string(str[:stri])
2012-02-12 21:24:18 +00:00
c <- l
stri = 0
2012-02-12 21:24:18 +00:00
}
2011-12-14 08:00:39 +00:00
commt = true
case '\r':
// discard
// this means it can also not be used as rdata
case '\n':
// Escaped newline
2012-02-12 21:24:18 +00:00
if quote {
str[stri] = x
2012-02-12 21:24:18 +00:00
stri++
break
}
// inside quotes this is legal
escape = false
2011-12-14 08:00:39 +00:00
if commt {
2011-12-14 08:26:31 +00:00
// Reset a comment
2011-12-14 08:00:39 +00:00
commt = false
rrtype = false
stri = 0
// If not in a brace this ends the comment AND the RR
if brace == 0 {
owner = true
owner = true
2011-12-18 16:58:06 +00:00
l.value = _NEWLINE
l.token = "\n"
c <- l
}
2011-12-14 08:00:39 +00:00
break
}
if brace == 0 {
2012-02-19 19:51:04 +00:00
// If there is previous text, we should output it here
if stri != 0 {
l.value = _STRING
l.token = string(str[:stri])
if !rrtype {
if _, ok := Str_rr[strings.ToUpper(l.token)]; ok {
l.value = _RRTYPE
rrtype = true
}
}
c <- l
}
2011-12-14 08:00:39 +00:00
l.value = _NEWLINE
l.token = "\n"
c <- l
2012-02-19 19:51:04 +00:00
stri = 0
commt = false
rrtype = false
owner = true
}
case '\\':
2012-02-12 21:24:18 +00:00
// quote?
2011-12-18 18:59:01 +00:00
if commt {
break
}
if escape {
str[stri] = x
stri++
escape = false
break
}
str[stri] = x
stri++
2011-12-18 18:59:01 +00:00
escape = true
case '"':
2011-12-14 08:00:39 +00:00
if commt {
break
}
if escape {
str[stri] = x
stri++
escape = false
break
}
space = false
// send previous gathered text and the quote
2012-02-12 21:24:18 +00:00
if stri != 0 {
l.value = _STRING
l.token = string(str[:stri])
c <- l
stri = 0
}
l.value = _QUOTE
l.token = "\""
c <- l
2011-12-14 08:00:39 +00:00
quote = !quote
2012-02-14 12:57:12 +00:00
case '(', ')':
2012-02-12 21:24:18 +00:00
if quote {
str[stri] = x
2012-02-12 21:24:18 +00:00
stri++
break
}
2011-12-14 08:00:39 +00:00
if commt {
break
}
if escape {
str[stri] = x
stri++
escape = false
break
}
switch x {
case ')':
brace--
if brace < 0 {
2012-02-23 19:13:37 +00:00
l.token = "extra closing brace"
l.err = true
c <- l
return
}
case '(':
brace++
}
2011-12-14 08:00:39 +00:00
default:
if commt {
break
}
escape = false
str[stri] = x
stri++
2011-12-14 08:00:39 +00:00
space = false
}
x, err = s.tokenText()
2011-12-14 08:00:39 +00:00
}
// Hmm.
if stri > 0 {
// Send remainder
l.token = string(str[:stri])
l.value = _STRING
c <- l
}
2011-12-14 08:00:39 +00:00
}
2011-12-18 16:58:06 +00:00
// Extract the class number from CLASSxx
func classToInt(token string) (uint16, bool) {
class, ok := strconv.Atoi(token[5:])
if ok != nil {
return 0, false
}
return uint16(class), true
}
// Extract the rr number from TYPExxx
func typeToInt(token string) (uint16, bool) {
typ, ok := strconv.Atoi(token[4:])
if ok != nil {
return 0, false
}
return uint16(typ), true
}
2012-02-15 22:08:21 +00:00
// Parse things like 2w, 2m, etc, Return the time in seconds.
2012-02-15 08:04:09 +00:00
func stringToTtl(l lex, f string) (uint32, bool) {
s := uint32(0)
i := uint32(0)
for _, c := range l.token {
switch c {
case 's', 'S':
s += i
i = 0
case 'm', 'M':
s += i * 60
i = 0
case 'h', 'H':
s += i * 60 * 60
i = 0
case 'd', 'D':
s += i * 60 * 60 * 24
i = 0
case 'w', 'W':
s += i * 60 * 60 * 24 * 7
i = 0
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
i *= 10
i += uint32(c) - '0'
default:
return 0, false
}
}
return s + i, true
}
func appendOrigin(name, origin string) string {
if origin == "." {
return name + origin
}
return name + "." + origin
2011-12-18 16:58:06 +00:00
}
2012-02-18 18:24:53 +00:00
func slurpRemainder(c chan lex, f string) *ParseError {
l := <-c
switch l.value {
case _BLANK:
l = <-c
if l.value != _NEWLINE && l.value != _EOF {
return &ParseError{f, "garbage after rdata", l}
}
// Ok
case _NEWLINE:
// Ok
case _EOF:
// Ok
default:
return &ParseError{f, "garbage after rdata", l}
}
return nil
}