server.go 54.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// HTTP server.  See RFC 2616.

package http

import (
	"bufio"
	"crypto/tls"
12
	"errors"
13 14
	"fmt"
	"io"
15
	"io/ioutil"
16 17
	"log"
	"net"
18
	"net/url"
19
	"os"
20
	"path"
21
	"runtime"
22 23
	"strconv"
	"strings"
24
	"sync"
25 26 27 28 29
	"time"
)

// Errors introduced by the HTTP server.
var (
30
	ErrWriteAfterFlush = errors.New("Conn.Write called after Flush")
31
	ErrBodyNotAllowed  = errors.New("http: request method or response status code does not allow body")
32 33
	ErrHijacked        = errors.New("Conn has been hijacked")
	ErrContentLength   = errors.New("Conn.Write wrote more than the declared Content-Length")
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
)

// Objects implementing the Handler interface can be
// registered to serve a particular path or subtree
// in the HTTP server.
//
// ServeHTTP should write reply headers and data to the ResponseWriter
// and then return.  Returning signals that the request is finished
// and that the HTTP server can move on to the next request on
// the connection.
type Handler interface {
	ServeHTTP(ResponseWriter, *Request)
}

// A ResponseWriter interface is used by an HTTP handler to
// construct an HTTP response.
type ResponseWriter interface {
51 52 53 54
	// Header returns the header map that will be sent by WriteHeader.
	// Changing the header after a call to WriteHeader (or Write) has
	// no effect.
	Header() Header
55 56 57

	// Write writes the data to the connection as part of an HTTP reply.
	// If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK)
58 59 60
	// before writing the data.  If the Header does not contain a
	// Content-Type line, Write adds a Content-Type set to the result of passing
	// the initial 512 bytes of written data to DetectContentType.
61
	Write([]byte) (int, error)
62 63 64 65 66 67 68

	// WriteHeader sends an HTTP response header with status code.
	// If WriteHeader is not called explicitly, the first call to Write
	// will trigger an implicit WriteHeader(http.StatusOK).
	// Thus explicit calls to WriteHeader are mainly used to
	// send error codes.
	WriteHeader(int)
69
}
70

71 72 73 74 75 76 77 78
// The Flusher interface is implemented by ResponseWriters that allow
// an HTTP handler to flush buffered data to the client.
//
// Note that even for ResponseWriters that support Flush,
// if the client is connected through an HTTP proxy,
// the buffered data may not reach the client until the response
// completes.
type Flusher interface {
79 80
	// Flush sends any buffered data to the client.
	Flush()
81
}
82

83 84 85
// The Hijacker interface is implemented by ResponseWriters that allow
// an HTTP handler to take over the connection.
type Hijacker interface {
86 87 88 89 90
	// Hijack lets the caller take over the connection.
	// After a call to Hijack(), the HTTP server library
	// will not do anything else with the connection.
	// It becomes the caller's responsibility to manage
	// and close the connection.
91
	Hijack() (net.Conn, *bufio.ReadWriter, error)
92 93
}

94 95 96 97 98 99 100 101 102 103 104
// The CloseNotifier interface is implemented by ResponseWriters which
// allow detecting when the underlying connection has gone away.
//
// This mechanism can be used to cancel long operations on the server
// if the client has disconnected before the response is ready.
type CloseNotifier interface {
	// CloseNotify returns a channel that receives a single value
	// when the client connection has gone away.
	CloseNotify() <-chan bool
}

105 106
// A conn represents the server side of an HTTP connection.
type conn struct {
107
	remoteAddr string               // network address of remote side
108
	server     *Server              // the Server on which the connection arrived
109
	rwc        net.Conn             // i/o connection
110
	sr         liveSwitchReader     // where the LimitReader reads from; usually the rwc
111 112
	lr         *io.LimitedReader    // io.LimitReader(sr)
	buf        *bufio.ReadWriter    // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc
113
	tlsState   *tls.ConnectionState // or nil when not using TLS
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147

	mu           sync.Mutex // guards the following
	clientGone   bool       // if client has disconnected mid-request
	closeNotifyc chan bool  // made lazily
	hijackedv    bool       // connection has been hijacked by handler
}

func (c *conn) hijacked() bool {
	c.mu.Lock()
	defer c.mu.Unlock()
	return c.hijackedv
}

func (c *conn) hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
	c.mu.Lock()
	defer c.mu.Unlock()
	if c.hijackedv {
		return nil, nil, ErrHijacked
	}
	if c.closeNotifyc != nil {
		return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier")
	}
	c.hijackedv = true
	rwc = c.rwc
	buf = c.buf
	c.rwc = nil
	c.buf = nil
	return
}

func (c *conn) closeNotify() <-chan bool {
	c.mu.Lock()
	defer c.mu.Unlock()
	if c.closeNotifyc == nil {
148
		c.closeNotifyc = make(chan bool, 1)
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
		if c.hijackedv {
			// to obey the function signature, even though
			// it'll never receive a value.
			return c.closeNotifyc
		}
		pr, pw := io.Pipe()

		readSource := c.sr.r
		c.sr.Lock()
		c.sr.r = pr
		c.sr.Unlock()
		go func() {
			_, err := io.Copy(pw, readSource)
			if err == nil {
				err = io.EOF
			}
			pw.CloseWithError(err)
			c.noteClientGone()
		}()
	}
	return c.closeNotifyc
}

func (c *conn) noteClientGone() {
	c.mu.Lock()
	defer c.mu.Unlock()
	if c.closeNotifyc != nil && !c.clientGone {
		c.closeNotifyc <- true
	}
	c.clientGone = true
}

181 182
// A switchReader can have its Reader changed at runtime.
// It's not safe for concurrent Reads and switches.
183
type switchReader struct {
184 185 186 187 188 189 190 191 192 193 194 195
	io.Reader
}

// A switchWriter can have its Writer changed at runtime.
// It's not safe for concurrent Writes and switches.
type switchWriter struct {
	io.Writer
}

// A liveSwitchReader is a switchReader that's safe for concurrent
// reads and switches, if its mutex is held.
type liveSwitchReader struct {
196 197 198 199
	sync.Mutex
	r io.Reader
}

200
func (sr *liveSwitchReader) Read(p []byte) (n int, err error) {
201 202 203 204
	sr.Lock()
	r := sr.r
	sr.Unlock()
	return r.Read(p)
205 206
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220
// This should be >= 512 bytes for DetectContentType,
// but otherwise it's somewhat arbitrary.
const bufferBeforeChunkingSize = 2048

// chunkWriter writes to a response's conn buffer, and is the writer
// wrapped by the response.bufw buffered writer.
//
// chunkWriter also is responsible for finalizing the Header, including
// conditionally setting the Content-Type and setting a Content-Length
// in cases where the handler's final output is smaller than the buffer
// size. It also conditionally adds chunk headers, when in chunking mode.
//
// See the comment above (*response).Write for the entire write flow.
type chunkWriter struct {
221 222 223 224 225 226 227 228 229 230 231 232 233
	res *response

	// header is either nil or a deep clone of res.handlerHeader
	// at the time of res.WriteHeader, if res.WriteHeader is
	// called and extra buffering is being done to calculate
	// Content-Type and/or Content-Length.
	header Header

	// wroteHeader tells whether the header's been written to "the
	// wire" (or rather: w.conn.buf). this is unlike
	// (*response).wroteHeader, which tells only whether it was
	// logically written.
	wroteHeader bool
234 235 236 237 238

	// set by the writeHeader method:
	chunking bool // using chunked transfer encoding for reply body
}

239 240 241 242
var (
	crlf       = []byte("\r\n")
	colonSpace = []byte(": ")
)
243 244 245 246 247

func (cw *chunkWriter) Write(p []byte) (n int, err error) {
	if !cw.wroteHeader {
		cw.writeHeader(p)
	}
248 249 250 251
	if cw.res.req.Method == "HEAD" {
		// Eat writes.
		return len(p), nil
	}
252 253 254
	if cw.chunking {
		_, err = fmt.Fprintf(cw.res.conn.buf, "%x\r\n", len(p))
		if err != nil {
255
			cw.res.conn.rwc.Close()
256 257 258 259 260 261 262
			return
		}
	}
	n, err = cw.res.conn.buf.Write(p)
	if cw.chunking && err == nil {
		_, err = cw.res.conn.buf.Write(crlf)
	}
263 264 265
	if err != nil {
		cw.res.conn.rwc.Close()
	}
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
	return
}

func (cw *chunkWriter) flush() {
	if !cw.wroteHeader {
		cw.writeHeader(nil)
	}
	cw.res.conn.buf.Flush()
}

func (cw *chunkWriter) close() {
	if !cw.wroteHeader {
		cw.writeHeader(nil)
	}
	if cw.chunking {
		// zero EOF chunk, trailer key/value pairs (currently
		// unsupported in Go's server), followed by a blank
		// line.
284
		cw.res.conn.buf.WriteString("0\r\n\r\n")
285 286 287
	}
}

288 289 290
// A response represents the server side of an HTTP response.
type response struct {
	conn          *conn
291
	req           *Request // request for this response
292
	wroteHeader   bool     // reply header has been (logically) written
293
	wroteContinue bool     // 100 Continue response was written
294 295

	w  *bufio.Writer // buffers output in chunks to chunkWriter
296 297
	cw chunkWriter
	sw *switchWriter // of the bufio.Writer, for return to putBufioWriter
298 299 300 301 302 303

	// handlerHeader is the Header that Handlers get access to,
	// which may be retained and mutated even after WriteHeader.
	// handlerHeader is copied into cw.header at WriteHeader
	// time, and privately mutated thereafter.
	handlerHeader Header
304
	calledHeader  bool // handler accessed handlerHeader via Header
305 306 307 308

	written       int64 // number of bytes written in body
	contentLength int64 // explicitly-declared Content-Length; or -1
	status        int   // status code passed to WriteHeader
309 310 311 312 313 314

	// close connection after this reply.  set on request and
	// updated after response from handler if there's a
	// "Connection: keep-alive" response header and a
	// Content-Length.
	closeAfterReply bool
315 316 317

	// requestBodyLimitHit is set by requestTooLarge when
	// maxBytesReader hits its max size. It is checked in
318
	// WriteHeader, to make sure we don't consume the
319
	// remaining request body to try to advance to the next HTTP
320
	// request. Instead, when this is set, we stop reading
321 322 323
	// subsequent requests on this connection and stop reading
	// input from it.
	requestBodyLimitHit bool
324 325

	handlerDone bool // set true when the handler exits
326 327 328 329

	// Buffers for Date and Content-Length
	dateBuf [len(TimeFormat)]byte
	clenBuf [10]byte
330 331 332 333 334 335 336 337 338 339
}

// requestTooLarge is called by maxBytesReader when too much input has
// been read from the client.
func (w *response) requestTooLarge() {
	w.closeAfterReply = true
	w.requestBodyLimitHit = true
	if !w.wroteHeader {
		w.Header().Set("Connection", "close")
	}
340 341
}

342
// needsSniff reports whether a Content-Type still needs to be sniffed.
343
func (w *response) needsSniff() bool {
344 345
	_, haveType := w.handlerHeader["Content-Type"]
	return !w.cw.wroteHeader && !haveType && w.written < sniffLen
346 347
}

348 349
// writerOnly hides an io.Writer value's optional ReadFrom method
// from io.Copy.
350 351 352 353
type writerOnly struct {
	io.Writer
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
func srcIsRegularFile(src io.Reader) (isRegular bool, err error) {
	switch v := src.(type) {
	case *os.File:
		fi, err := v.Stat()
		if err != nil {
			return false, err
		}
		return fi.Mode().IsRegular(), nil
	case *io.LimitedReader:
		return srcIsRegularFile(v.R)
	default:
		return
	}
}

// ReadFrom is here to optimize copying from an *os.File regular file
// to a *net.TCPConn with sendfile.
371
func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
372 373 374 375 376 377 378 379 380 381 382 383 384 385
	// Our underlying w.conn.rwc is usually a *TCPConn (with its
	// own ReadFrom method). If not, or if our src isn't a regular
	// file, just fall back to the normal copy method.
	rf, ok := w.conn.rwc.(io.ReaderFrom)
	regFile, err := srcIsRegularFile(src)
	if err != nil {
		return 0, err
	}
	if !ok || !regFile {
		return io.Copy(writerOnly{w}, src)
	}

	// sendfile path:

386 387 388
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
389 390 391 392 393 394 395 396 397 398 399 400 401 402

	if w.needsSniff() {
		n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen))
		n += n0
		if err != nil {
			return n, err
		}
	}

	w.w.Flush()  // get rid of any previous writes
	w.cw.flush() // make sure Header is written; flush data to rwc

	// Now that cw has been flushed, its chunking field is guaranteed initialized.
	if !w.cw.chunking && w.bodyAllowed() {
403 404 405 406
		n0, err := rf.ReadFrom(src)
		n += n0
		w.written += n0
		return n, err
407
	}
408 409 410 411

	n0, err := io.Copy(writerOnly{w}, src)
	n += n0
	return n, err
412 413 414 415 416
}

// noLimit is an effective infinite upper bound for io.LimitedReader
const noLimit int64 = (1 << 63) - 1

417 418 419 420
// debugServerConnections controls whether all server connections are wrapped
// with a verbose logging wrapper.
const debugServerConnections = false

421
// Create new connection from rwc.
422
func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) {
423 424
	c = new(conn)
	c.remoteAddr = rwc.RemoteAddr().String()
425
	c.server = srv
426
	c.rwc = rwc
427 428 429
	if debugServerConnections {
		c.rwc = newLoggingConn("server", c.rwc)
	}
430
	c.sr = liveSwitchReader{r: c.rwc}
431
	c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader)
432 433
	br := newBufioReader(c.lr)
	bw := newBufioWriterSize(c.rwc, 4<<10)
434 435 436 437
	c.buf = bufio.NewReadWriter(br, bw)
	return c, nil
}

438 439
// TODO: use a sync.Cache instead
var (
440 441 442
	bufioReaderCache   = make(chan *bufio.Reader, 4)
	bufioWriterCache2k = make(chan *bufio.Writer, 4)
	bufioWriterCache4k = make(chan *bufio.Writer, 4)
443 444
)

445
func bufioWriterCache(size int) chan *bufio.Writer {
446 447 448 449 450 451 452 453 454
	switch size {
	case 2 << 10:
		return bufioWriterCache2k
	case 4 << 10:
		return bufioWriterCache4k
	}
	return nil
}

455
func newBufioReader(r io.Reader) *bufio.Reader {
456 457
	select {
	case p := <-bufioReaderCache:
458 459
		p.Reset(r)
		return p
460
	default:
461
		return bufio.NewReader(r)
462 463 464
	}
}

465 466
func putBufioReader(br *bufio.Reader) {
	br.Reset(nil)
467
	select {
468
	case bufioReaderCache <- br:
469 470 471 472
	default:
	}
}

473
func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
474 475
	select {
	case p := <-bufioWriterCache(size):
476 477
		p.Reset(w)
		return p
478
	default:
479
		return bufio.NewWriterSize(w, size)
480 481 482
	}
}

483 484
func putBufioWriter(bw *bufio.Writer) {
	bw.Reset(nil)
485
	select {
486
	case bufioWriterCache(bw.Available()) <- bw:
487 488 489 490
	default:
	}
}

491 492 493 494 495 496 497 498 499 500 501 502
// DefaultMaxHeaderBytes is the maximum permitted size of the headers
// in an HTTP request.
// This can be overridden by setting Server.MaxHeaderBytes.
const DefaultMaxHeaderBytes = 1 << 20 // 1 MB

func (srv *Server) maxHeaderBytes() int {
	if srv.MaxHeaderBytes > 0 {
		return srv.MaxHeaderBytes
	}
	return DefaultMaxHeaderBytes
}

503 504 505 506 507
// wrapper around io.ReaderCloser which on first read, sends an
// HTTP/1.1 100 Continue header
type expectContinueReader struct {
	resp       *response
	readCloser io.ReadCloser
508
	closed     bool
509 510
}

511
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
512
	if ecr.closed {
513
		return 0, ErrBodyReadAfterClose
514
	}
515
	if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
516
		ecr.resp.wroteContinue = true
517
		ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
518 519 520 521 522
		ecr.resp.conn.buf.Flush()
	}
	return ecr.readCloser.Read(p)
}

523
func (ecr *expectContinueReader) Close() error {
524
	ecr.closed = true
525 526 527 528 529 530 531 532 533
	return ecr.readCloser.Close()
}

// TimeFormat is the time format to use with
// time.Parse and time.Time.Format when parsing
// or generating times in HTTP headers.
// It is like time.RFC1123 but hard codes GMT as the time zone.
const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
func appendTime(b []byte, t time.Time) []byte {
	const days = "SunMonTueWedThuFriSat"
	const months = "JanFebMarAprMayJunJulAugSepOctNovDec"

	t = t.UTC()
	yy, mm, dd := t.Date()
	hh, mn, ss := t.Clock()
	day := days[3*t.Weekday():]
	mon := months[3*(mm-1):]

	return append(b,
		day[0], day[1], day[2], ',', ' ',
		byte('0'+dd/10), byte('0'+dd%10), ' ',
		mon[0], mon[1], mon[2], ' ',
		byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
		byte('0'+hh/10), byte('0'+hh%10), ':',
		byte('0'+mn/10), byte('0'+mn%10), ':',
		byte('0'+ss/10), byte('0'+ss%10), ' ',
		'G', 'M', 'T')
}

556
var errTooLarge = errors.New("http: request too large")
557

558
// Read next request from connection.
559
func (c *conn) readRequest() (w *response, err error) {
560
	if c.hijacked() {
561 562
		return nil, ErrHijacked
	}
563 564 565 566 567 568 569 570 571 572

	if d := c.server.ReadTimeout; d != 0 {
		c.rwc.SetReadDeadline(time.Now().Add(d))
	}
	if d := c.server.WriteTimeout; d != 0 {
		defer func() {
			c.rwc.SetWriteDeadline(time.Now().Add(d))
		}()
	}

573
	c.lr.N = int64(c.server.maxHeaderBytes()) + 4096 /* bufio slop */
574 575
	var req *Request
	if req, err = ReadRequest(c.buf.Reader); err != nil {
576 577 578
		if c.lr.N == 0 {
			return nil, errTooLarge
		}
579 580
		return nil, err
	}
581
	c.lr.N = noLimit
582

583 584 585
	req.RemoteAddr = c.remoteAddr
	req.TLS = c.tlsState

586 587 588 589 590 591 592
	w = &response{
		conn:          c,
		req:           req,
		handlerHeader: make(Header),
		contentLength: -1,
	}
	w.cw.res = w
593
	w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
594 595 596
	return w, nil
}

597
func (w *response) Header() Header {
598 599 600 601 602 603 604
	if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
		// Accessing the header between logically writing it
		// and physically writing it means we need to allocate
		// a clone to snapshot the logically written state.
		w.cw.header = w.handlerHeader.clone()
	}
	w.calledHeader = true
605
	return w.handlerHeader
606
}
607

608
// maxPostHandlerReadBytes is the max number of Request.Body bytes not
609
// consumed by a handler that the server will read from the client
610 611 612 613 614 615 616 617 618
// in order to keep a connection alive.  If there are more bytes than
// this then the server to be paranoid instead sends a "Connection:
// close" response.
//
// This number is approximately what a typical machine's TCP buffer
// size is anyway.  (if we have the bytes on the machine, we might as
// well read them)
const maxPostHandlerReadBytes = 256 << 10

619
func (w *response) WriteHeader(code int) {
620
	if w.conn.hijacked() {
621 622 623 624 625 626 627
		log.Print("http: response.WriteHeader on hijacked connection")
		return
	}
	if w.wroteHeader {
		log.Print("http: multiple response.WriteHeader calls")
		return
	}
628 629 630
	w.wroteHeader = true
	w.status = code

631 632 633
	if w.calledHeader && w.cw.header == nil {
		w.cw.header = w.handlerHeader.clone()
	}
634

635
	if cl := w.handlerHeader.get("Content-Length"); cl != "" {
636 637 638
		v, err := strconv.ParseInt(cl, 10, 64)
		if err == nil && v >= 0 {
			w.contentLength = v
639
		} else {
640
			log.Printf("http: invalid Content-Length of %q", cl)
641 642 643 644 645 646 647 648 649 650 651 652
			w.handlerHeader.Del("Content-Length")
		}
	}
}

// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
// This type is used to avoid extra allocations from cloning and/or populating
// the response Header map and all its 1-element slices.
type extraHeader struct {
	contentType      string
	connection       string
	transferEncoding string
653 654
	date             []byte // written if not nil
	contentLength    []byte // written if not nil
655 656 657 658
}

// Sorted the same as extraHeader.Write's loop.
var extraHeaderKeys = [][]byte{
659 660 661
	[]byte("Content-Type"),
	[]byte("Connection"),
	[]byte("Transfer-Encoding"),
662 663
}

664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
var (
	headerContentLength = []byte("Content-Length: ")
	headerDate          = []byte("Date: ")
)

// Write writes the headers described in h to w.
//
// This method has a value receiver, despite the somewhat large size
// of h, because it prevents an allocation. The escape analysis isn't
// smart enough to realize this function doesn't mutate h.
func (h extraHeader) Write(w *bufio.Writer) {
	if h.date != nil {
		w.Write(headerDate)
		w.Write(h.date)
		w.Write(crlf)
	}
	if h.contentLength != nil {
		w.Write(headerContentLength)
		w.Write(h.contentLength)
		w.Write(crlf)
	}
	for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
686 687 688
		if v != "" {
			w.Write(extraHeaderKeys[i])
			w.Write(colonSpace)
689
			w.WriteString(v)
690
			w.Write(crlf)
691 692
		}
	}
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
}

// writeHeader finalizes the header sent to the client and writes it
// to cw.res.conn.buf.
//
// p is not written by writeHeader, but is the first chunk of the body
// that will be written.  It is sniffed for a Content-Type if none is
// set explicitly.  It's also used to set the Content-Length, if the
// total body size was small and the handler has already finished
// running.
func (cw *chunkWriter) writeHeader(p []byte) {
	if cw.wroteHeader {
		return
	}
	cw.wroteHeader = true

	w := cw.res
710
	isHEAD := w.req.Method == "HEAD"
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736

	// header is written out to w.conn.buf below. Depending on the
	// state of the handler, we either own the map or not. If we
	// don't own it, the exclude map is created lazily for
	// WriteSubset to remove headers. The setHeader struct holds
	// headers we need to add.
	header := cw.header
	owned := header != nil
	if !owned {
		header = w.handlerHeader
	}
	var excludeHeader map[string]bool
	delHeader := func(key string) {
		if owned {
			header.Del(key)
			return
		}
		if _, ok := header[key]; !ok {
			return
		}
		if excludeHeader == nil {
			excludeHeader = make(map[string]bool)
		}
		excludeHeader[key] = true
	}
	var setHeader extraHeader
737 738 739 740 741

	// If the handler is done but never sent a Content-Length
	// response header and this is our first (and last) write, set
	// it, even to zero. This helps HTTP/1.0 clients keep their
	// "keep-alive" connections alive.
742 743 744 745 746 747 748 749 750
	// Exceptions: 304 responses never get Content-Length, and if
	// it was a HEAD request, we don't know the difference between
	// 0 actual bytes and 0 bytes because the handler noticed it
	// was a HEAD request and chose not to write anything.  So for
	// HEAD, the handler should either write the Content-Length or
	// write non-zero bytes.  If it's actually 0 bytes and the
	// handler never looked at the Request.Method, we just don't
	// send a Content-Length header.
	if w.handlerDone && w.status != StatusNotModified && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
751
		w.contentLength = int64(len(p))
752
		setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
753 754 755 756 757
	}

	// If this was an HTTP/1.0 request with keep-alive and we sent a
	// Content-Length back, we can make this a keep-alive response ...
	if w.req.wantsHttp10KeepAlive() {
758 759
		sentLength := header.get("Content-Length") != ""
		if sentLength && header.get("Connection") == "keep-alive" {
760 761 762 763 764 765
			w.closeAfterReply = false
		}
	}

	// Check for a explicit (and valid) Content-Length header.
	hasCL := w.contentLength != -1
766

767
	if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) {
768
		_, connectionHeaderSet := header["Connection"]
769
		if !connectionHeaderSet {
770
			setHeader.connection = "keep-alive"
771
		}
772
	} else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() {
773 774 775
		w.closeAfterReply = true
	}

776
	if header.get("Connection") == "close" {
777 778
		w.closeAfterReply = true
	}
779 780

	// Per RFC 2616, we should consume the request body before
781 782 783 784
	// replying, if the handler hasn't already done so.  But we
	// don't want to do an unbounded amount of reading here for
	// DoS reasons, so we only try up to a threshold.
	if w.req.ContentLength != 0 && !w.closeAfterReply {
785 786
		ecr, isExpecter := w.req.Body.(*expectContinueReader)
		if !isExpecter || ecr.resp.wroteContinue {
787 788 789
			n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1)
			if n >= maxPostHandlerReadBytes {
				w.requestTooLarge()
790 791
				delHeader("Connection")
				setHeader.connection = "close"
792 793 794
			} else {
				w.req.Body.Close()
			}
795 796 797
		}
	}

798
	code := w.status
799 800
	if code == StatusNotModified {
		// Must not have body.
801 802 803
		// RFC 2616 section 10.3.5: "the response MUST NOT include other entity-headers"
		for _, k := range []string{"Content-Type", "Content-Length", "Transfer-Encoding"} {
			delHeader(k)
804 805
		}
	} else {
806
		// If no content type, apply sniffing algorithm to body.
807 808
		_, haveType := header["Content-Type"]
		if !haveType {
809
			setHeader.contentType = DetectContentType(p)
810 811 812
		}
	}

813
	if _, ok := header["Date"]; !ok {
814
		setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
815 816
	}

817
	te := header.get("Transfer-Encoding")
818
	hasTE := te != ""
819 820 821 822
	if hasCL && hasTE && te != "identity" {
		// TODO: return an error if WriteHeader gets a return parameter
		// For now just ignore the Content-Length.
		log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
823
			te, w.contentLength)
824
		delHeader("Content-Length")
825 826 827
		hasCL = false
	}

828
	if w.req.Method == "HEAD" || code == StatusNotModified {
829
		// do nothing
830
	} else if code == StatusNoContent {
831
		delHeader("Transfer-Encoding")
832
	} else if hasCL {
833
		delHeader("Transfer-Encoding")
834 835 836 837 838 839
	} else if w.req.ProtoAtLeast(1, 1) {
		// HTTP/1.1 or greater: use chunked transfer encoding
		// to avoid closing the connection at EOF.
		// TODO: this blows away any custom or stacked Transfer-Encoding they
		// might have set.  Deal with that as need arises once we have a valid
		// use case.
840
		cw.chunking = true
841
		setHeader.transferEncoding = "chunked"
842 843 844 845 846
	} else {
		// HTTP version < 1.1: cannot do chunked transfer
		// encoding and we don't know the Content-Length so
		// signal EOF by closing connection.
		w.closeAfterReply = true
847
		delHeader("Transfer-Encoding") // in case already set
848 849
	}

850
	// Cannot use Content-Length with non-identity Transfer-Encoding.
851
	if cw.chunking {
852
		delHeader("Content-Length")
853
	}
854 855 856
	if !w.req.ProtoAtLeast(1, 0) {
		return
	}
857

858
	if w.closeAfterReply && !hasToken(cw.header.get("Connection"), "close") {
859
		delHeader("Connection")
860 861 862
		if w.req.ProtoAtLeast(1, 1) {
			setHeader.connection = "close"
		}
863 864
	}

865
	w.conn.buf.WriteString(statusLine(w.req, code))
866
	cw.header.WriteSubset(w.conn.buf, excludeHeader)
867
	setHeader.Write(w.conn.buf.Writer)
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
	w.conn.buf.Write(crlf)
}

// statusLines is a cache of Status-Line strings, keyed by code (for
// HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a
// map keyed by struct of two fields. This map's max size is bounded
// by 2*len(statusText), two protocol types for each known official
// status code in the statusText map.
var (
	statusMu    sync.RWMutex
	statusLines = make(map[int]string)
)

// statusLine returns a response Status-Line (RFC 2616 Section 6.1)
// for the given request and response status code.
func statusLine(req *Request, code int) string {
	// Fast path:
	key := code
	proto11 := req.ProtoAtLeast(1, 1)
	if !proto11 {
		key = -key
	}
	statusMu.RLock()
	line, ok := statusLines[key]
	statusMu.RUnlock()
	if ok {
		return line
	}

	// Slow path:
898
	proto := "HTTP/1.0"
899
	if proto11 {
900 901 902 903 904 905 906
		proto = "HTTP/1.1"
	}
	codestring := strconv.Itoa(code)
	text, ok := statusText[code]
	if !ok {
		text = "status code " + codestring
	}
907 908 909 910 911 912 913
	line = proto + " " + codestring + " " + text + "\r\n"
	if ok {
		statusMu.Lock()
		defer statusMu.Unlock()
		statusLines[key] = line
	}
	return line
914 915 916 917 918 919 920 921
}

// bodyAllowed returns true if a Write is allowed for this response type.
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
	if !w.wroteHeader {
		panic("")
	}
922
	return w.status != StatusNotModified
923 924
}

925 926 927 928
// The Life Of A Write is like this:
//
// Handler starts. No header has been sent. The handler can either
// write a header, or just start writing.  Writing before sending a header
929
// sends an implicitly empty 200 OK header.
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
//
// If the handler didn't declare a Content-Length up front, we either
// go into chunking mode or, if the handler finishes running before
// the chunking buffer size, we compute a Content-Length and send that
// in the header instead.
//
// Likewise, if the handler didn't set a Content-Type, we sniff that
// from the initial chunk of output.
//
// The Writers are wired together like:
//
// 1. *response (the ResponseWriter) ->
// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes
// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
//    and which writes the chunk headers, if needed.
// 4. conn.buf, a bufio.Writer of default (4kB) bytes
// 5. the rwc, the net.Conn.
//
// TODO(bradfitz): short-circuit some of the buffering when the
// initial header contains both a Content-Type and Content-Length.
// Also short-circuit in (1) when the header's been sent and not in
// chunking mode, writing directly to (4) instead, if (2) has no
// buffered data.  More generally, we could short-circuit from (1) to
// (3) even in chunking mode if the write size from (1) is over some
// threshold and nothing is in (2).  The answer might be mostly making
// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
// with this instead.
957
func (w *response) Write(data []byte) (n int, err error) {
958 959 960 961 962 963 964 965 966
	return w.write(len(data), data, "")
}

func (w *response) WriteString(data string) (n int, err error) {
	return w.write(len(data), nil, data)
}

// either dataB or dataS is non-zero.
func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
967
	if w.conn.hijacked() {
968 969 970 971 972 973
		log.Print("http: response.Write on hijacked connection")
		return 0, ErrHijacked
	}
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
974
	if lenData == 0 {
975 976
		return 0, nil
	}
977
	if !w.bodyAllowed() {
978 979 980
		return 0, ErrBodyNotAllowed
	}

981
	w.written += int64(lenData) // ignoring errors, for errorKludge
982 983 984
	if w.contentLength != -1 && w.written > w.contentLength {
		return 0, ErrContentLength
	}
985 986 987 988 989
	if dataB != nil {
		return w.w.Write(dataB)
	} else {
		return w.w.WriteString(dataS)
	}
990 991 992
}

func (w *response) finishRequest() {
993 994
	w.handlerDone = true

995 996 997
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
998 999

	w.w.Flush()
1000
	putBufioWriter(w.w)
1001
	w.cw.close()
1002
	w.conn.buf.Flush()
1003

1004 1005 1006 1007 1008
	// Close the body, unless we're about to close the whole TCP connection
	// anyway.
	if !w.closeAfterReply {
		w.req.Body.Close()
	}
1009 1010 1011
	if w.req.MultipartForm != nil {
		w.req.MultipartForm.RemoveAll()
	}
1012

1013
	if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
1014 1015 1016
		// Did not write enough. Avoid getting out of sync.
		w.closeAfterReply = true
	}
1017 1018 1019 1020 1021 1022
}

func (w *response) Flush() {
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
1023 1024
	w.w.Flush()
	w.cw.flush()
1025 1026
}

1027
func (c *conn) finalFlush() {
1028 1029
	if c.buf != nil {
		c.buf.Flush()
1030 1031 1032

		// Steal the bufio.Reader (~4KB worth of memory) and its associated
		// reader for a future connection.
1033
		putBufioReader(c.buf.Reader)
1034 1035 1036

		// Steal the bufio.Writer (~4KB worth of memory) and its associated
		// writer for a future connection.
1037
		putBufioWriter(c.buf.Writer)
1038

1039 1040
		c.buf = nil
	}
1041 1042 1043 1044 1045
}

// Close the connection.
func (c *conn) close() {
	c.finalFlush()
1046 1047 1048 1049 1050 1051
	if c.rwc != nil {
		c.rwc.Close()
		c.rwc = nil
	}
}

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
// rstAvoidanceDelay is the amount of time we sleep after closing the
// write side of a TCP connection before closing the entire socket.
// By sleeping, we increase the chances that the client sees our FIN
// and processes its final data before they process the subsequent RST
// from closing a connection with known unread data.
// This RST seems to occur mostly on BSD systems. (And Windows?)
// This timeout is somewhat arbitrary (~latency around the planet).
const rstAvoidanceDelay = 500 * time.Millisecond

// closeWrite flushes any outstanding data and sends a FIN packet (if
// client is connected via TCP), signalling that we're done.  We then
// pause for a bit, hoping the client processes it before `any
// subsequent RST.
//
// See http://golang.org/issue/3595
func (c *conn) closeWriteAndWait() {
1068 1069 1070 1071
	c.finalFlush()
	if tcp, ok := c.rwc.(*net.TCPConn); ok {
		tcp.CloseWrite()
	}
1072
	time.Sleep(rstAvoidanceDelay)
1073 1074
}

1075
// validNPN reports whether the proto is not a blacklisted Next
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
// Protocol Negotiation protocol.  Empty and built-in protocol types
// are blacklisted and can't be overridden with alternate
// implementations.
func validNPN(proto string) bool {
	switch proto {
	case "", "http/1.1", "http/1.0":
		return false
	}
	return true
}

1087 1088
// Serve a new connection.
func (c *conn) serve() {
1089
	defer func() {
1090 1091 1092 1093 1094
		if err := recover(); err != nil {
			const size = 4096
			buf := make([]byte, size)
			buf = buf[:runtime.Stack(buf, false)]
			log.Printf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
1095
		}
1096 1097
		if !c.hijacked() {
			c.close()
1098
		}
1099 1100
	}()

1101
	if tlsConn, ok := c.rwc.(*tls.Conn); ok {
1102 1103 1104 1105 1106 1107
		if d := c.server.ReadTimeout; d != 0 {
			c.rwc.SetReadDeadline(time.Now().Add(d))
		}
		if d := c.server.WriteTimeout; d != 0 {
			c.rwc.SetWriteDeadline(time.Now().Add(d))
		}
1108 1109 1110 1111 1112
		if err := tlsConn.Handshake(); err != nil {
			return
		}
		c.tlsState = new(tls.ConnectionState)
		*c.tlsState = tlsConn.ConnectionState()
1113 1114 1115 1116 1117 1118 1119
		if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) {
			if fn := c.server.TLSNextProto[proto]; fn != nil {
				h := initNPNRequest{tlsConn, serverHandler{c.server}}
				fn(c.server, tlsConn, h)
			}
			return
		}
1120 1121
	}

1122 1123 1124
	for {
		w, err := c.readRequest()
		if err != nil {
1125 1126 1127 1128 1129 1130
			if err == errTooLarge {
				// Their HTTP client may or may not be
				// able to read this if we're
				// responding to them and hanging up
				// while they're still writing their
				// request.  Undefined behavior.
1131 1132 1133
				io.WriteString(c.rwc, "HTTP/1.1 413 Request Entity Too Large\r\n\r\n")
				c.closeWriteAndWait()
				break
1134
			} else if err == io.EOF {
1135 1136 1137
				break // Don't reply
			} else if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
				break // Don't reply
1138
			}
1139
			io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\n\r\n")
1140 1141
			break
		}
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152

		// Expect 100 Continue support
		req := w.req
		if req.expectsContinue() {
			if req.ProtoAtLeast(1, 1) {
				// Wrap the Body reader with one that replies on the connection
				req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
			}
			if req.ContentLength == 0 {
				w.Header().Set("Connection", "close")
				w.WriteHeader(StatusBadRequest)
1153
				w.finishRequest()
1154 1155 1156
				break
			}
			req.Header.Del("Expect")
1157
		} else if req.Header.get("Expect") != "" {
1158
			w.sendExpectationFailed()
1159 1160 1161
			break
		}

1162 1163 1164 1165 1166
		// HTTP cannot have multiple simultaneous active requests.[*]
		// Until the server replies to this request, it can't read another,
		// so we might as well run the handler in this goroutine.
		// [*] Not strictly true: HTTP pipelining.  We could let them all process
		// in parallel even if their responses need to be serialized.
1167
		serverHandler{c.server}.ServeHTTP(w, w.req)
1168
		if c.hijacked() {
1169 1170 1171 1172
			return
		}
		w.finishRequest()
		if w.closeAfterReply {
1173
			if w.requestBodyLimitHit {
1174
				c.closeWriteAndWait()
1175
			}
1176 1177 1178 1179 1180
			break
		}
	}
}

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
func (w *response) sendExpectationFailed() {
	// TODO(bradfitz): let ServeHTTP handlers handle
	// requests with non-standard expectation[s]? Seems
	// theoretical at best, and doesn't fit into the
	// current ServeHTTP model anyway.  We'd need to
	// make the ResponseWriter an optional
	// "ExpectReplier" interface or something.
	//
	// For now we'll just obey RFC 2616 14.20 which says
	// "If a server receives a request containing an
	// Expect field that includes an expectation-
	// extension that it does not support, it MUST
	// respond with a 417 (Expectation Failed) status."
	w.Header().Set("Connection", "close")
	w.WriteHeader(StatusExpectationFailed)
	w.finishRequest()
}

1199 1200
// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
// and a Hijacker.
1201
func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
1202 1203 1204
	if w.wroteHeader {
		w.cw.flush()
	}
1205 1206 1207 1208 1209
	return w.conn.hijack()
}

func (w *response) CloseNotify() <-chan bool {
	return w.conn.closeNotify()
1210 1211 1212 1213 1214 1215 1216 1217
}

// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as HTTP handlers.  If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Request)

1218
// ServeHTTP calls f(w, r).
1219 1220 1221 1222 1223 1224 1225
func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
	f(w, r)
}

// Helper handlers

// Error replies to the request with the specified error message and HTTP code.
1226
// The error message should be plain text.
1227
func Error(w ResponseWriter, error string, code int) {
1228
	w.Header().Set("Content-Type", "text/plain; charset=utf-8")
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	w.WriteHeader(code)
	fmt.Fprintln(w, error)
}

// NotFound replies to the request with an HTTP 404 not found error.
func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }

// NotFoundHandler returns a simple request handler
// that replies to each request with a ``404 page not found'' reply.
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }

1240 1241 1242 1243 1244 1245
// StripPrefix returns a handler that serves HTTP requests
// by removing the given prefix from the request URL's Path
// and invoking the handler h. StripPrefix handles a
// request for a path that doesn't begin with prefix by
// replying with an HTTP 404 not found error.
func StripPrefix(prefix string, h Handler) Handler {
1246 1247 1248
	if prefix == "" {
		return h
	}
1249
	return HandlerFunc(func(w ResponseWriter, r *Request) {
1250 1251 1252 1253
		if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) {
			r.URL.Path = p
			h.ServeHTTP(w, r)
		} else {
1254 1255 1256 1257 1258
			NotFound(w, r)
		}
	})
}

1259 1260
// Redirect replies to the request with a redirect to url,
// which may be a path relative to the request path.
1261 1262
func Redirect(w ResponseWriter, r *Request, urlStr string, code int) {
	if u, err := url.Parse(urlStr); err == nil {
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
		// If url was relative, make absolute by
		// combining with request path.
		// The browser would probably do this for us,
		// but doing it ourselves is more reliable.

		// NOTE(rsc): RFC 2616 says that the Location
		// line must be an absolute URI, like
		// "http://www.google.com/redirect/",
		// not a path like "/redirect/".
		// Unfortunately, we don't know what to
		// put in the host name section to get the
		// client to connect to us again, so we can't
		// know the right absolute URI to send back.
		// Because of this problem, no one pays attention
		// to the RFC; they all send back just a new path.
		// So do we.
		oldpath := r.URL.Path
		if oldpath == "" { // should not happen, but avoid a crash if it does
			oldpath = "/"
1282
		}
1283 1284
		if u.Scheme == "" {
			// no leading http://server
1285
			if urlStr == "" || urlStr[0] != '/' {
1286 1287
				// make relative path absolute
				olddir, _ := path.Split(oldpath)
1288 1289 1290 1291 1292 1293
				urlStr = olddir + urlStr
			}

			var query string
			if i := strings.Index(urlStr, "?"); i != -1 {
				urlStr, query = urlStr[:i], urlStr[i:]
1294
			}
1295

1296
			// clean up but preserve trailing slash
1297
			trailing := strings.HasSuffix(urlStr, "/")
1298
			urlStr = path.Clean(urlStr)
1299
			if trailing && !strings.HasSuffix(urlStr, "/") {
1300
				urlStr += "/"
1301
			}
1302
			urlStr += query
1303 1304 1305
		}
	}

1306
	w.Header().Set("Location", urlStr)
1307
	w.WriteHeader(code)
1308 1309 1310 1311 1312

	// RFC2616 recommends that a short note "SHOULD" be included in the
	// response because older user agents may not understand 301/307.
	// Shouldn't send the response for POST or HEAD; that leaves GET.
	if r.Method == "GET" {
1313
		note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n"
1314 1315 1316 1317
		fmt.Fprintln(w, note)
	}
}

1318 1319 1320 1321
var htmlReplacer = strings.NewReplacer(
	"&", "&amp;",
	"<", "&lt;",
	">", "&gt;",
1322 1323 1324 1325
	// "&#34;" is shorter than "&quot;".
	`"`, "&#34;",
	// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
	"'", "&#39;",
1326 1327
)

1328
func htmlEscape(s string) string {
1329
	return htmlReplacer.Replace(s)
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
}

// Redirect to a fixed URL
type redirectHandler struct {
	url  string
	code int
}

func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
	Redirect(w, r, rh.url, rh.code)
}

// RedirectHandler returns a request handler that redirects
// each request it receives to the given url using the given
// status code.
func RedirectHandler(url string, code int) Handler {
	return &redirectHandler{url, code}
}

// ServeMux is an HTTP request multiplexer.
// It matches the URL of each incoming request against a list of registered
// patterns and calls the handler for the pattern that
// most closely matches the URL.
//
1354
// Patterns name fixed, rooted paths, like "/favicon.ico",
1355
// or rooted subtrees, like "/images/" (note the trailing slash).
1356 1357 1358 1359
// Longer patterns take precedence over shorter ones, so that
// if there are handlers registered for both "/images/"
// and "/images/thumbnails/", the latter handler will be
// called for paths beginning "/images/thumbnails/" and the
1360
// former will receive requests for any other paths in the
1361 1362
// "/images/" subtree.
//
1363 1364 1365 1366
// Note that since a pattern ending in a slash names a rooted subtree,
// the pattern "/" matches all paths not matched by other registered
// patterns, not just the URL with Path == "/".
//
1367 1368 1369 1370 1371
// Patterns may optionally begin with a host name, restricting matches to
// URLs on that host only.  Host-specific patterns take precedence over
// general patterns, so that a handler might register for the two patterns
// "/codesearch" and "codesearch.google.com/" without also taking over
// requests for "http://www.google.com/".
1372 1373 1374 1375 1376
//
// ServeMux also takes care of sanitizing the URL request path,
// redirecting any request containing . or .. elements to an
// equivalent .- and ..-free URL.
type ServeMux struct {
1377 1378 1379
	mu    sync.RWMutex
	m     map[string]muxEntry
	hosts bool // whether any patterns contain hostnames
1380 1381 1382 1383 1384
}

type muxEntry struct {
	explicit bool
	h        Handler
1385
	pattern  string
1386 1387 1388
}

// NewServeMux allocates and returns a new ServeMux.
1389
func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} }
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423

// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()

// Does path match pattern?
func pathMatch(pattern, path string) bool {
	if len(pattern) == 0 {
		// should not happen
		return false
	}
	n := len(pattern)
	if pattern[n-1] != '/' {
		return pattern == path
	}
	return len(path) >= n && path[0:n] == pattern
}

// Return the canonical path for p, eliminating . and .. elements.
func cleanPath(p string) string {
	if p == "" {
		return "/"
	}
	if p[0] != '/' {
		p = "/" + p
	}
	np := path.Clean(p)
	// path.Clean removes trailing slash except for root;
	// put the trailing slash back if necessary.
	if p[len(p)-1] == '/' && np != "/" {
		np += "/"
	}
	return np
}

1424 1425
// Find a handler on a handler map given a path string
// Most-specific (longest) pattern wins
1426
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
1427 1428
	var n = 0
	for k, v := range mux.m {
1429
		if !pathMatch(k, path) {
1430 1431 1432 1433
			continue
		}
		if h == nil || len(k) > n {
			n = len(k)
1434
			h = v.h
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
			pattern = v.pattern
		}
	}
	return
}

// Handler returns the handler to use for the given request,
// consulting r.Method, r.Host, and r.URL.Path. It always returns
// a non-nil handler. If the path is not in its canonical form, the
// handler will be an internally-generated handler that redirects
// to the canonical path.
//
// Handler also returns the registered pattern that matches the
// request or, in the case of internally-generated redirects,
// the pattern that will match after following the redirect.
//
// If there is no registered handler that applies to the request,
// Handler returns a ``page not found'' handler and an empty pattern.
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
	if r.Method != "CONNECT" {
		if p := cleanPath(r.URL.Path); p != r.URL.Path {
			_, pattern = mux.handler(r.Host, p)
1457 1458 1459
			url := *r.URL
			url.Path = p
			return RedirectHandler(url.String(), StatusMovedPermanently), pattern
1460 1461
		}
	}
1462 1463

	return mux.handler(r.Host, r.URL.Path)
1464 1465
}

1466 1467 1468
// handler is the main implementation of Handler.
// The path is known to be in canonical form, except for CONNECT methods.
func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
1469 1470 1471 1472
	mux.mu.RLock()
	defer mux.mu.RUnlock()

	// Host-specific pattern takes precedence over generic ones
1473 1474 1475
	if mux.hosts {
		h, pattern = mux.match(host + path)
	}
1476
	if h == nil {
1477
		h, pattern = mux.match(path)
1478 1479
	}
	if h == nil {
1480
		h, pattern = NotFoundHandler(), ""
1481
	}
1482
	return
1483 1484
}

1485 1486 1487
// ServeHTTP dispatches the request to the handler whose
// pattern most closely matches the request URL.
func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
1488
	if r.RequestURI == "*" {
1489 1490 1491
		if r.ProtoAtLeast(1, 1) {
			w.Header().Set("Connection", "close")
		}
1492 1493 1494
		w.WriteHeader(StatusBadRequest)
		return
	}
1495 1496
	h, _ := mux.Handler(r)
	h.ServeHTTP(w, r)
1497 1498 1499
}

// Handle registers the handler for the given pattern.
1500
// If a handler already exists for pattern, Handle panics.
1501
func (mux *ServeMux) Handle(pattern string, handler Handler) {
1502 1503 1504
	mux.mu.Lock()
	defer mux.mu.Unlock()

1505
	if pattern == "" {
1506 1507
		panic("http: invalid pattern " + pattern)
	}
1508 1509 1510 1511 1512 1513
	if handler == nil {
		panic("http: nil handler")
	}
	if mux.m[pattern].explicit {
		panic("http: multiple registrations for " + pattern)
	}
1514

1515 1516 1517 1518 1519
	mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern}

	if pattern[0] != '/' {
		mux.hosts = true
	}
1520 1521

	// Helpful behavior:
1522 1523
	// If pattern is /tree/, insert an implicit permanent redirect for /tree.
	// It can be overridden by an explicit registration.
1524
	n := len(pattern)
1525
	if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit {
1526 1527 1528 1529 1530 1531 1532 1533 1534
		// If pattern contains a host name, strip it and use remaining
		// path for redirect.
		path := pattern
		if pattern[0] != '/' {
			// In pattern, at least the last character is a '/', so
			// strings.Index can't be -1.
			path = pattern[strings.Index(pattern, "/"):]
		}
		mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(path, StatusMovedPermanently), pattern: pattern}
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
	}
}

// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
	mux.Handle(pattern, HandlerFunc(handler))
}

// Handle registers the handler for the given pattern
// in the DefaultServeMux.
1545
// The documentation for ServeMux explains how patterns are matched.
1546 1547 1548 1549
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }

// HandleFunc registers the handler function for the given pattern
// in the DefaultServeMux.
1550
// The documentation for ServeMux explains how patterns are matched.
1551 1552 1553 1554 1555
func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
	DefaultServeMux.HandleFunc(pattern, handler)
}

// Serve accepts incoming HTTP connections on the listener l,
1556
// creating a new service goroutine for each.  The service goroutines
1557 1558
// read requests and then call handler to reply to them.
// Handler is typically nil, in which case the DefaultServeMux is used.
1559
func Serve(l net.Listener, handler Handler) error {
1560 1561 1562 1563 1564 1565
	srv := &Server{Handler: handler}
	return srv.Serve(l)
}

// A Server defines parameters for running an HTTP server.
type Server struct {
1566 1567
	Addr           string        // TCP address to listen on, ":http" if empty
	Handler        Handler       // handler to invoke, http.DefaultServeMux if nil
1568 1569
	ReadTimeout    time.Duration // maximum duration before timing out read of the request
	WriteTimeout   time.Duration // maximum duration before timing out write of the response
1570
	MaxHeaderBytes int           // maximum size of request headers, DefaultMaxHeaderBytes if 0
1571
	TLSConfig      *tls.Config   // optional TLS config, used by ListenAndServeTLS
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597

	// TLSNextProto optionally specifies a function to take over
	// ownership of the provided TLS connection when an NPN
	// protocol upgrade has occurred.  The map key is the protocol
	// name negotiated. The Handler argument should be used to
	// handle HTTP requests and will initialize the Request's TLS
	// and RemoteAddr if not already set.  The connection is
	// automatically closed when the function returns.
	TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
}

// serverHandler delegates to either the server's Handler or
// DefaultServeMux and also handles "OPTIONS *" requests.
type serverHandler struct {
	srv *Server
}

func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
	handler := sh.srv.Handler
	if handler == nil {
		handler = DefaultServeMux
	}
	if req.RequestURI == "*" && req.Method == "OPTIONS" {
		handler = globalOptionsHandler{}
	}
	handler.ServeHTTP(rw, req)
1598 1599 1600 1601 1602
}

// ListenAndServe listens on the TCP network address srv.Addr and then
// calls Serve to handle requests on incoming connections.  If
// srv.Addr is blank, ":http" is used.
1603
func (srv *Server) ListenAndServe() error {
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	addr := srv.Addr
	if addr == "" {
		addr = ":http"
	}
	l, e := net.Listen("tcp", addr)
	if e != nil {
		return e
	}
	return srv.Serve(l)
}

// Serve accepts incoming connections on the Listener l, creating a
1616
// new service goroutine for each.  The service goroutines read requests and
1617
// then call srv.Handler to reply to them.
1618
func (srv *Server) Serve(l net.Listener) error {
1619
	defer l.Close()
1620
	var tempDelay time.Duration // how long to sleep on accept failure
1621 1622 1623
	for {
		rw, e := l.Accept()
		if e != nil {
1624
			if ne, ok := e.(net.Error); ok && ne.Temporary() {
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
				if tempDelay == 0 {
					tempDelay = 5 * time.Millisecond
				} else {
					tempDelay *= 2
				}
				if max := 1 * time.Second; tempDelay > max {
					tempDelay = max
				}
				log.Printf("http: Accept error: %v; retrying in %v", e, tempDelay)
				time.Sleep(tempDelay)
1635 1636
				continue
			}
1637 1638
			return e
		}
1639
		tempDelay = 0
1640
		c, err := srv.newConn(rw)
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
		if err != nil {
			continue
		}
		go c.serve()
	}
}

// ListenAndServe listens on the TCP network address addr
// and then calls Serve with handler to handle requests
// on incoming connections.  Handler is typically nil,
// in which case the DefaultServeMux is used.
//
// A trivial example server is:
//
//	package main
//
//	import (
//		"io"
1659
//		"net/http"
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
//		"log"
//	)
//
//	// hello world, the web server
//	func HelloServer(w http.ResponseWriter, req *http.Request) {
//		io.WriteString(w, "hello, world!\n")
//	}
//
//	func main() {
//		http.HandleFunc("/hello", HelloServer)
//		err := http.ListenAndServe(":12345", nil)
//		if err != nil {
1672
//			log.Fatal("ListenAndServe: ", err)
1673 1674
//		}
//	}
1675
func ListenAndServe(addr string, handler Handler) error {
1676 1677
	server := &Server{Addr: addr, Handler: handler}
	return server.ListenAndServe()
1678 1679 1680 1681
}

// ListenAndServeTLS acts identically to ListenAndServe, except that it
// expects HTTPS connections. Additionally, files containing a certificate and
1682 1683 1684
// matching private key for the server must be provided. If the certificate
// is signed by a certificate authority, the certFile should be the concatenation
// of the server's certificate followed by the CA's certificate.
1685 1686 1687 1688 1689
//
// A trivial example server is:
//
//	import (
//		"log"
1690
//		"net/http"
1691 1692 1693
//	)
//
//	func handler(w http.ResponseWriter, req *http.Request) {
1694
//		w.Header().Set("Content-Type", "text/plain")
1695 1696 1697 1698 1699 1700 1701 1702
//		w.Write([]byte("This is an example server.\n"))
//	}
//
//	func main() {
//		http.HandleFunc("/", handler)
//		log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/")
//		err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil)
//		if err != nil {
1703
//			log.Fatal(err)
1704 1705 1706 1707
//		}
//	}
//
// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem.
1708
func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error {
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	server := &Server{Addr: addr, Handler: handler}
	return server.ListenAndServeTLS(certFile, keyFile)
}

// ListenAndServeTLS listens on the TCP network address srv.Addr and
// then calls Serve to handle requests on incoming TLS connections.
//
// Filenames containing a certificate and matching private key for
// the server must be provided. If the certificate is signed by a
// certificate authority, the certFile should be the concatenation
// of the server's certificate followed by the CA's certificate.
//
// If srv.Addr is blank, ":https" is used.
1722 1723
func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
	addr := srv.Addr
1724 1725 1726
	if addr == "" {
		addr = ":https"
	}
1727 1728 1729 1730 1731 1732
	config := &tls.Config{}
	if srv.TLSConfig != nil {
		*config = *srv.TLSConfig
	}
	if config.NextProtos == nil {
		config.NextProtos = []string{"http/1.1"}
1733 1734
	}

1735
	var err error
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
	config.Certificates = make([]tls.Certificate, 1)
	config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
	if err != nil {
		return err
	}

	conn, err := net.Listen("tcp", addr)
	if err != nil {
		return err
	}

	tlsListener := tls.NewListener(conn, config)
1748
	return srv.Serve(tlsListener)
1749
}
1750 1751 1752 1753

// TimeoutHandler returns a Handler that runs h with the given time limit.
//
// The new Handler calls h.ServeHTTP to handle each request, but if a
1754
// call runs for longer than its time limit, the handler responds with
1755 1756 1757 1758
// a 503 Service Unavailable error and the given message in its body.
// (If msg is empty, a suitable default message will be sent.)
// After such a timeout, writes by h to its ResponseWriter will return
// ErrHandlerTimeout.
1759 1760 1761
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
	f := func() <-chan time.Time {
		return time.After(dt)
1762 1763 1764 1765 1766 1767
	}
	return &timeoutHandler{h, f, msg}
}

// ErrHandlerTimeout is returned on ResponseWriter Write calls
// in handlers which have timed out.
1768
var ErrHandlerTimeout = errors.New("http: Handler timeout")
1769 1770 1771

type timeoutHandler struct {
	handler Handler
1772
	timeout func() <-chan time.Time // returns channel producing a timeout
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
	body    string
}

func (h *timeoutHandler) errorBody() string {
	if h.body != "" {
		return h.body
	}
	return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
}

func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
1784
	done := make(chan bool, 1)
1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
	tw := &timeoutWriter{w: w}
	go func() {
		h.handler.ServeHTTP(tw, r)
		done <- true
	}()
	select {
	case <-done:
		return
	case <-h.timeout():
		tw.mu.Lock()
		defer tw.mu.Unlock()
		if !tw.wroteHeader {
			tw.w.WriteHeader(StatusServiceUnavailable)
			tw.w.Write([]byte(h.errorBody()))
		}
		tw.timedOut = true
	}
}

type timeoutWriter struct {
	w ResponseWriter

	mu          sync.Mutex
	timedOut    bool
	wroteHeader bool
}

func (tw *timeoutWriter) Header() Header {
	return tw.w.Header()
}

1816
func (tw *timeoutWriter) Write(p []byte) (int, error) {
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
	tw.mu.Lock()
	timedOut := tw.timedOut
	tw.mu.Unlock()
	if timedOut {
		return 0, ErrHandlerTimeout
	}
	return tw.w.Write(p)
}

func (tw *timeoutWriter) WriteHeader(code int) {
	tw.mu.Lock()
	if tw.timedOut || tw.wroteHeader {
		tw.mu.Unlock()
		return
	}
	tw.wroteHeader = true
	tw.mu.Unlock()
	tw.w.WriteHeader(code)
}
1836

1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
// globalOptionsHandler responds to "OPTIONS *" requests.
type globalOptionsHandler struct{}

func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
	w.Header().Set("Content-Length", "0")
	if r.ContentLength != 0 {
		// Read up to 4KB of OPTIONS body (as mentioned in the
		// spec as being reserved for future use), but anything
		// over that is considered a waste of server resources
		// (or an attack) and we abort and close the connection,
		// courtesy of MaxBytesReader's EOF behavior.
		mb := MaxBytesReader(w, r.Body, 4<<10)
		io.Copy(ioutil.Discard, mb)
	}
}

1853
// eofReader is a non-nil io.ReadCloser that always returns EOF.
1854 1855 1856 1857 1858 1859 1860 1861 1862
// It embeds a *strings.Reader so it still has a WriteTo method
// and io.Copy won't need a buffer.
var eofReader = &struct {
	*strings.Reader
	io.Closer
}{
	strings.NewReader(""),
	ioutil.NopCloser(nil),
}
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885

// initNPNRequest is an HTTP handler that initializes certain
// uninitialized fields in its *Request. Such partially-initialized
// Requests come from NPN protocol handlers.
type initNPNRequest struct {
	c *tls.Conn
	h serverHandler
}

func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
	if req.TLS == nil {
		req.TLS = &tls.ConnectionState{}
		*req.TLS = h.c.ConnectionState()
	}
	if req.Body == nil {
		req.Body = eofReader
	}
	if req.RemoteAddr == "" {
		req.RemoteAddr = h.c.RemoteAddr().String()
	}
	h.h.ServeHTTP(rw, req)
}

1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
// loggingConn is used for debugging.
type loggingConn struct {
	name string
	net.Conn
}

var (
	uniqNameMu   sync.Mutex
	uniqNameNext = make(map[string]int)
)

func newLoggingConn(baseName string, c net.Conn) net.Conn {
	uniqNameMu.Lock()
	defer uniqNameMu.Unlock()
	uniqNameNext[baseName]++
	return &loggingConn{
		name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
		Conn: c,
	}
}

func (c *loggingConn) Write(p []byte) (n int, err error) {
	log.Printf("%s.Write(%d) = ....", c.name, len(p))
	n, err = c.Conn.Write(p)
	log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
	return
}

func (c *loggingConn) Read(p []byte) (n int, err error) {
	log.Printf("%s.Read(%d) = ....", c.name, len(p))
	n, err = c.Conn.Read(p)
	log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
	return
}

func (c *loggingConn) Close() (err error) {
	log.Printf("%s.Close() = ...", c.name)
	err = c.Conn.Close()
	log.Printf("%s.Close() = %v", c.name, err)
	return
}