Refactor server shutdown to call Close() on conn and sync on srv.started
Remove the necessity for the hackish (and unreliable) fake packet. Fix a couple races and unclutter the start/stop internal state.
This commit is contained in:
parent
7463496b65
commit
a58e9c7a9e
121
server.go
121
server.go
|
@ -271,27 +271,21 @@ type Server struct {
|
||||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||||
DecorateWriter DecorateWriter
|
DecorateWriter DecorateWriter
|
||||||
|
|
||||||
// For graceful shutdown.
|
// Graceful shutdown handling
|
||||||
stopUDP chan bool
|
|
||||||
stopTCP chan bool
|
|
||||||
wgUDP sync.WaitGroup
|
|
||||||
wgTCP sync.WaitGroup
|
|
||||||
|
|
||||||
// make start/shutdown not racy
|
inFlight sync.WaitGroup
|
||||||
lock sync.Mutex
|
|
||||||
|
lock sync.RWMutex
|
||||||
started bool
|
started bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListenAndServe starts a nameserver on the configured address in *Server.
|
// ListenAndServe starts a nameserver on the configured address in *Server.
|
||||||
func (srv *Server) ListenAndServe() error {
|
func (srv *Server) ListenAndServe() error {
|
||||||
srv.lock.Lock()
|
srv.lock.Lock()
|
||||||
// We can't use defer() becasue serveTCP/serveUDP don't return.
|
defer srv.lock.Unlock()
|
||||||
if srv.started {
|
if srv.started {
|
||||||
srv.lock.Unlock()
|
|
||||||
return &Error{err: "server already started"}
|
return &Error{err: "server already started"}
|
||||||
}
|
}
|
||||||
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
|
|
||||||
srv.started = true
|
|
||||||
addr := srv.Addr
|
addr := srv.Addr
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
addr = ":domain"
|
addr = ":domain"
|
||||||
|
@ -303,43 +297,37 @@ func (srv *Server) ListenAndServe() error {
|
||||||
case "tcp", "tcp4", "tcp6":
|
case "tcp", "tcp4", "tcp6":
|
||||||
a, e := net.ResolveTCPAddr(srv.Net, addr)
|
a, e := net.ResolveTCPAddr(srv.Net, addr)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
l, e := net.ListenTCP(srv.Net, a)
|
l, e := net.ListenTCP(srv.Net, a)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
srv.Listener = l
|
srv.Listener = l
|
||||||
|
srv.started = true
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
return srv.serveTCP(l)
|
e = srv.serveTCP(l)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return e
|
||||||
case "udp", "udp4", "udp6":
|
case "udp", "udp4", "udp6":
|
||||||
a, e := net.ResolveUDPAddr(srv.Net, addr)
|
a, e := net.ResolveUDPAddr(srv.Net, addr)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
l, e := net.ListenUDP(srv.Net, a)
|
l, e := net.ListenUDP(srv.Net, a)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
if e := setUDPSocketOptions(l); e != nil {
|
if e := setUDPSocketOptions(l); e != nil {
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
srv.PacketConn = l
|
srv.PacketConn = l
|
||||||
|
srv.started = true
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
return srv.serveUDP(l)
|
e = srv.serveUDP(l)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return e
|
||||||
}
|
}
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return &Error{err: "bad network"}
|
return &Error{err: "bad network"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,12 +335,10 @@ func (srv *Server) ListenAndServe() error {
|
||||||
// configured in *Server. Its main use is to start a server from systemd.
|
// configured in *Server. Its main use is to start a server from systemd.
|
||||||
func (srv *Server) ActivateAndServe() error {
|
func (srv *Server) ActivateAndServe() error {
|
||||||
srv.lock.Lock()
|
srv.lock.Lock()
|
||||||
|
defer srv.lock.Unlock()
|
||||||
if srv.started {
|
if srv.started {
|
||||||
srv.lock.Unlock()
|
|
||||||
return &Error{err: "server already started"}
|
return &Error{err: "server already started"}
|
||||||
}
|
}
|
||||||
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
|
|
||||||
srv.started = true
|
|
||||||
pConn := srv.PacketConn
|
pConn := srv.PacketConn
|
||||||
l := srv.Listener
|
l := srv.Listener
|
||||||
if pConn != nil {
|
if pConn != nil {
|
||||||
|
@ -361,22 +347,24 @@ func (srv *Server) ActivateAndServe() error {
|
||||||
}
|
}
|
||||||
if t, ok := pConn.(*net.UDPConn); ok {
|
if t, ok := pConn.(*net.UDPConn); ok {
|
||||||
if e := setUDPSocketOptions(t); e != nil {
|
if e := setUDPSocketOptions(t); e != nil {
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
srv.started = true
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
return srv.serveUDP(t)
|
e := srv.serveUDP(t)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if l != nil {
|
if l != nil {
|
||||||
if t, ok := l.(*net.TCPListener); ok {
|
if t, ok := l.(*net.TCPListener); ok {
|
||||||
|
srv.started = true
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
return srv.serveTCP(t)
|
e := srv.serveTCP(t)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
srv.lock.Unlock()
|
|
||||||
srv.started = false
|
|
||||||
return &Error{err: "bad listeners"}
|
return &Error{err: "bad listeners"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,36 +379,20 @@ func (srv *Server) Shutdown() error {
|
||||||
return &Error{err: "server not started"}
|
return &Error{err: "server not started"}
|
||||||
}
|
}
|
||||||
srv.started = false
|
srv.started = false
|
||||||
net, addr := srv.Net, srv.Addr
|
|
||||||
switch {
|
|
||||||
case srv.Listener != nil:
|
|
||||||
a := srv.Listener.Addr()
|
|
||||||
net, addr = a.Network(), a.String()
|
|
||||||
case srv.PacketConn != nil:
|
|
||||||
a := srv.PacketConn.LocalAddr()
|
|
||||||
net, addr = a.Network(), a.String()
|
|
||||||
}
|
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
|
|
||||||
fin := make(chan bool)
|
if srv.PacketConn != nil {
|
||||||
switch net {
|
srv.PacketConn.Close()
|
||||||
case "tcp", "tcp4", "tcp6":
|
}
|
||||||
go func() {
|
if srv.Listener != nil {
|
||||||
srv.stopTCP <- true
|
srv.Listener.Close()
|
||||||
srv.wgTCP.Wait()
|
|
||||||
fin <- true
|
|
||||||
}()
|
|
||||||
|
|
||||||
case "udp", "udp4", "udp6":
|
|
||||||
go func() {
|
|
||||||
srv.stopUDP <- true
|
|
||||||
srv.wgUDP.Wait()
|
|
||||||
fin <- true
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &Client{Net: net}
|
fin := make(chan bool)
|
||||||
go c.Exchange(new(Msg), addr) // extra query to help ReadXXX loop to pass
|
go func() {
|
||||||
|
srv.inFlight.Wait()
|
||||||
|
fin <- true
|
||||||
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(srv.getReadTimeout()):
|
case <-time.After(srv.getReadTimeout()):
|
||||||
|
@ -465,15 +437,16 @@ func (srv *Server) serveTCP(l *net.TCPListener) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
m, e := reader.ReadTCP(rw, rtimeout)
|
m, e := reader.ReadTCP(rw, rtimeout)
|
||||||
select {
|
srv.lock.RLock()
|
||||||
case <-srv.stopTCP:
|
if !srv.started {
|
||||||
|
srv.lock.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
|
srv.lock.RUnlock()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
srv.wgTCP.Add(1)
|
srv.inFlight.Add(1)
|
||||||
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -500,21 +473,24 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||||
// deadline is not used here
|
// deadline is not used here
|
||||||
for {
|
for {
|
||||||
m, s, e := reader.ReadUDP(l, rtimeout)
|
m, s, e := reader.ReadUDP(l, rtimeout)
|
||||||
select {
|
srv.lock.RLock()
|
||||||
case <-srv.stopUDP:
|
if !srv.started {
|
||||||
|
srv.lock.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
|
srv.lock.RUnlock()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
srv.wgUDP.Add(1)
|
srv.inFlight.Add(1)
|
||||||
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
|
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serve a new connection.
|
// Serve a new connection.
|
||||||
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
|
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
|
||||||
|
defer srv.inFlight.Done()
|
||||||
|
|
||||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
||||||
if srv.DecorateWriter != nil {
|
if srv.DecorateWriter != nil {
|
||||||
w.writer = srv.DecorateWriter(w)
|
w.writer = srv.DecorateWriter(w)
|
||||||
|
@ -524,15 +500,6 @@ func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *Ses
|
||||||
|
|
||||||
q := 0 // counter for the amount of TCP queries we get
|
q := 0 // counter for the amount of TCP queries we get
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if u != nil {
|
|
||||||
srv.wgUDP.Done()
|
|
||||||
}
|
|
||||||
if t != nil {
|
|
||||||
srv.wgTCP.Done()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
reader := Reader(&defaultReader{srv})
|
reader := Reader(&defaultReader{srv})
|
||||||
if srv.DecorateReader != nil {
|
if srv.DecorateReader != nil {
|
||||||
reader = srv.DecorateReader(reader)
|
reader = srv.DecorateReader(reader)
|
||||||
|
|
Loading…
Reference in New Issue