server.go 104 KB
Newer Older
1 2 3 4
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

5
// HTTP server. See RFC 7230 through 7235.
6 7 8 9 10

package http

import (
	"bufio"
11
	"bytes"
12
	"context"
13
	"crypto/tls"
14
	"errors"
15 16
	"fmt"
	"io"
17
	"io/ioutil"
18 19
	"log"
	"net"
20
	"net/textproto"
21
	"net/url"
22
	"os"
23
	"path"
24
	"runtime"
25
	"sort"
26 27
	"strconv"
	"strings"
28
	"sync"
29
	"sync/atomic"
30
	"time"
31

32
	"golang.org/x/net/http/httpguts"
33 34
)

35
// Errors used by the HTTP server.
36
var (
37 38 39 40 41 42 43
	// ErrBodyNotAllowed is returned by ResponseWriter.Write calls
	// when the HTTP method or response code does not permit a
	// body.
	ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")

	// ErrHijacked is returned by ResponseWriter.Write calls when
	// the underlying connection has been hijacked using the
44 45 46
	// Hijacker interface. A zero-byte write on a hijacked
	// connection will return ErrHijacked without any other side
	// effects.
47 48 49 50 51 52 53 54
	ErrHijacked = errors.New("http: connection has been hijacked")

	// ErrContentLength is returned by ResponseWriter.Write calls
	// when a Handler set a Content-Length response header with a
	// declared size and then attempted to write more bytes than
	// declared.
	ErrContentLength = errors.New("http: wrote more than the declared Content-Length")

55 56 57
	// Deprecated: ErrWriteAfterFlush is no longer returned by
	// anything in the net/http package. Callers should not
	// compare errors against this variable.
58
	ErrWriteAfterFlush = errors.New("unused")
59 60
)

61
// A Handler responds to an HTTP request.
62 63
//
// ServeHTTP should write reply headers and data to the ResponseWriter
64 65 66 67 68 69 70 71 72 73
// and then return. Returning signals that the request is finished; it
// is not valid to use the ResponseWriter or read from the
// Request.Body after or concurrently with the completion of the
// ServeHTTP call.
//
// Depending on the HTTP client software, HTTP protocol version, and
// any intermediaries between the client and the Go server, it may not
// be possible to read from the Request.Body after writing to the
// ResponseWriter. Cautious handlers should read the Request.Body
// first, and then reply.
74
//
75 76 77
// Except for reading the body, handlers should not modify the
// provided Request.
//
78 79 80
// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
// that the effect of the panic was isolated to the active request.
// It recovers the panic, logs a stack trace to the server error log,
81 82 83 84
// and either closes the network connection or sends an HTTP/2
// RST_STREAM, depending on the HTTP protocol. To abort a handler so
// the client sees an interrupted response but the server doesn't log
// an error, panic with the value ErrAbortHandler.
85 86 87 88 89 90
type Handler interface {
	ServeHTTP(ResponseWriter, *Request)
}

// A ResponseWriter interface is used by an HTTP handler to
// construct an HTTP response.
91 92 93
//
// A ResponseWriter may not be used after the Handler.ServeHTTP method
// has returned.
94
type ResponseWriter interface {
95
	// Header returns the header map that will be sent by
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	// WriteHeader. The Header map also is the mechanism with which
	// Handlers can set HTTP trailers.
	//
	// Changing the header map after a call to WriteHeader (or
	// Write) has no effect unless the modified headers are
	// trailers.
	//
	// There are two ways to set Trailers. The preferred way is to
	// predeclare in the headers which trailers you will later
	// send by setting the "Trailer" header to the names of the
	// trailer keys which will come later. In this case, those
	// keys of the Header map are treated as if they were
	// trailers. See the example. The second way, for trailer
	// keys not known to the Handler until after the first Write,
	// is to prefix the Header map keys with the TrailerPrefix
	// constant value. See TrailerPrefix.
	//
113
	// To suppress automatic response headers (such as "Date"), set
114
	// their value to nil.
115
	Header() Header
116 117

	// Write writes the data to the connection as part of an HTTP reply.
118 119 120 121 122
	//
	// If WriteHeader has not yet been called, Write calls
	// WriteHeader(http.StatusOK) before writing the data. If the Header
	// does not contain a Content-Type line, Write adds a Content-Type set
	// to the result of passing the initial 512 bytes of written data to
123 124 125
	// DetectContentType. Additionally, if the total size of all written
	// data is under a few KB and there are no Flush calls, the
	// Content-Length header is added automatically.
126 127 128 129 130 131 132 133 134 135 136 137
	//
	// Depending on the HTTP protocol version and the client, calling
	// Write or WriteHeader may prevent future reads on the
	// Request.Body. For HTTP/1.x requests, handlers should read any
	// needed request body data before writing the response. Once the
	// headers have been flushed (due to either an explicit Flusher.Flush
	// call or writing enough data to trigger a flush), the request body
	// may be unavailable. For HTTP/2 requests, the Go HTTP server permits
	// handlers to continue to read the request body while concurrently
	// writing the response. However, such behavior may not be supported
	// by all HTTP/2 clients. Handlers should read before writing if
	// possible to maximize compatibility.
138
	Write([]byte) (int, error)
139

140 141 142
	// WriteHeader sends an HTTP response header with the provided
	// status code.
	//
143 144 145 146
	// If WriteHeader is not called explicitly, the first call to Write
	// will trigger an implicit WriteHeader(http.StatusOK).
	// Thus explicit calls to WriteHeader are mainly used to
	// send error codes.
147 148 149 150 151 152 153
	//
	// The provided code must be a valid HTTP 1xx-5xx status code.
	// Only one header may be written. Go does not currently
	// support sending user-defined 1xx informational headers,
	// with the exception of 100-continue response header that the
	// Server sends automatically when the Request.Body is read.
	WriteHeader(statusCode int)
154
}
155

156 157 158
// The Flusher interface is implemented by ResponseWriters that allow
// an HTTP handler to flush buffered data to the client.
//
159 160 161 162
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations
// support Flusher, but ResponseWriter wrappers may not. Handlers
// should always test for this ability at runtime.
//
163 164 165 166 167
// Note that even for ResponseWriters that support Flush,
// if the client is connected through an HTTP proxy,
// the buffered data may not reach the client until the response
// completes.
type Flusher interface {
168 169
	// Flush sends any buffered data to the client.
	Flush()
170
}
171

172 173
// The Hijacker interface is implemented by ResponseWriters that allow
// an HTTP handler to take over the connection.
174 175 176 177 178
//
// The default ResponseWriter for HTTP/1.x connections supports
// Hijacker, but HTTP/2 connections intentionally do not.
// ResponseWriter wrappers may also not support Hijacker. Handlers
// should always test for this ability at runtime.
179
type Hijacker interface {
180
	// Hijack lets the caller take over the connection.
181
	// After a call to Hijack the HTTP server library
182
	// will not do anything else with the connection.
183
	//
184 185
	// It becomes the caller's responsibility to manage
	// and close the connection.
186 187 188 189 190
	//
	// The returned net.Conn may have read or write deadlines
	// already set, depending on the configuration of the
	// Server. It is the caller's responsibility to set
	// or clear those deadlines as needed.
191 192 193
	//
	// The returned bufio.Reader may contain unprocessed buffered
	// data from the client.
194
	//
195 196 197 198
	// After a call to Hijack, the original Request.Body must not
	// be used. The original Request's Context remains valid and
	// is not canceled until the Request's ServeHTTP method
	// returns.
199
	Hijack() (net.Conn, *bufio.ReadWriter, error)
200 201
}

202 203 204 205 206
// The CloseNotifier interface is implemented by ResponseWriters which
// allow detecting when the underlying connection has gone away.
//
// This mechanism can be used to cancel long operations on the server
// if the client has disconnected before the response is ready.
207 208 209
//
// Deprecated: the CloseNotifier interface predates Go's context package.
// New code should use Request.Context instead.
210
type CloseNotifier interface {
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	// CloseNotify returns a channel that receives at most a
	// single value (true) when the client connection has gone
	// away.
	//
	// CloseNotify may wait to notify until Request.Body has been
	// fully read.
	//
	// After the Handler has returned, there is no guarantee
	// that the channel receives a value.
	//
	// If the protocol is HTTP/1.1 and CloseNotify is called while
	// processing an idempotent request (such a GET) while
	// HTTP/1.1 pipelining is in use, the arrival of a subsequent
	// pipelined request may cause a value to be sent on the
	// returned channel. In practice HTTP/1.1 pipelining is not
	// enabled in browsers and not seen often in the wild. If this
	// is a problem, use HTTP/2 or only use CloseNotify on methods
	// such as POST.
229 230 231
	CloseNotify() <-chan bool
}

232 233
var (
	// ServerContextKey is a context key. It can be used in HTTP
234
	// handlers with Context.Value to access the server that
235 236 237 238 239
	// started the handler. The associated value will be of
	// type *Server.
	ServerContextKey = &contextKey{"http-server"}

	// LocalAddrContextKey is a context key. It can be used in
240
	// HTTP handlers with Context.Value to access the local
241
	// address the connection arrived on.
242 243 244 245
	// The associated value will be of type net.Addr.
	LocalAddrContextKey = &contextKey{"local-addr"}
)

246 247
// A conn represents the server side of an HTTP connection.
type conn struct {
248 249 250 251
	// server is the server on which the connection arrived.
	// Immutable; never nil.
	server *Server

252 253 254
	// cancelCtx cancels the connection-level context.
	cancelCtx context.CancelFunc

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	// rwc is the underlying network connection.
	// This is never wrapped by other types and is the value given out
	// to CloseNotifier callers. It is usually of type *net.TCPConn or
	// *tls.Conn.
	rwc net.Conn

	// remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
	// inside the Listener's Accept goroutine, as some implementations block.
	// It is populated immediately inside the (*conn).serve goroutine.
	// This is the value of a Handler's (*Request).RemoteAddr.
	remoteAddr string

	// tlsState is the TLS connection state when using TLS.
	// nil means not TLS.
	tlsState *tls.ConnectionState

	// werr is set to the first write error to rwc.
	// It is set via checkConnErrorWriter{w}, where bufw writes.
	werr error

	// r is bufr's read source. It's a wrapper around rwc that provides
	// io.LimitedReader-style limiting (while reading request headers)
	// and functionality to support CloseNotifier. See *connReader docs.
	r *connReader

	// bufr reads from r.
	bufr *bufio.Reader

	// bufw writes to checkConnErrorWriter{c}, which populates werr on error.
	bufw *bufio.Writer

	// lastMethod is the method of the most recent request
	// on this connection, if any.
	lastMethod string

290 291
	curReq atomic.Value // of *response (which has a Request in it)

292
	curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState))
293 294

	// mu guards hijackedv
295 296 297 298 299 300
	mu sync.Mutex

	// hijackedv is whether this connection has been hijacked
	// by a Handler with the Hijacker interface.
	// It is guarded by mu.
	hijackedv bool
301 302 303 304 305 306 307 308
}

func (c *conn) hijacked() bool {
	c.mu.Lock()
	defer c.mu.Unlock()
	return c.hijackedv
}

309 310
// c.mu must be held.
func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
311 312 313
	if c.hijackedv {
		return nil, nil, ErrHijacked
	}
314 315
	c.r.abortPendingRead()

316 317
	c.hijackedv = true
	rwc = c.rwc
318 319
	rwc.SetDeadline(time.Time{})

320
	buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
321 322 323 324 325
	if c.r.hasByte {
		if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
			return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
		}
	}
326
	c.setState(rwc, StateHijacked)
327 328 329
	return
}

330 331 332 333 334 335 336 337 338 339 340 341 342 343
// This should be >= 512 bytes for DetectContentType,
// but otherwise it's somewhat arbitrary.
const bufferBeforeChunkingSize = 2048

// chunkWriter writes to a response's conn buffer, and is the writer
// wrapped by the response.bufw buffered writer.
//
// chunkWriter also is responsible for finalizing the Header, including
// conditionally setting the Content-Type and setting a Content-Length
// in cases where the handler's final output is smaller than the buffer
// size. It also conditionally adds chunk headers, when in chunking mode.
//
// See the comment above (*response).Write for the entire write flow.
type chunkWriter struct {
344 345 346
	res *response

	// header is either nil or a deep clone of res.handlerHeader
347
	// at the time of res.writeHeader, if res.writeHeader is
348 349 350 351 352 353 354 355 356
	// called and extra buffering is being done to calculate
	// Content-Type and/or Content-Length.
	header Header

	// wroteHeader tells whether the header's been written to "the
	// wire" (or rather: w.conn.buf). this is unlike
	// (*response).wroteHeader, which tells only whether it was
	// logically written.
	wroteHeader bool
357 358 359 360 361

	// set by the writeHeader method:
	chunking bool // using chunked transfer encoding for reply body
}

362 363 364 365
var (
	crlf       = []byte("\r\n")
	colonSpace = []byte(": ")
)
366 367 368 369 370

func (cw *chunkWriter) Write(p []byte) (n int, err error) {
	if !cw.wroteHeader {
		cw.writeHeader(p)
	}
371 372 373 374
	if cw.res.req.Method == "HEAD" {
		// Eat writes.
		return len(p), nil
	}
375
	if cw.chunking {
376
		_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
377
		if err != nil {
378
			cw.res.conn.rwc.Close()
379 380 381
			return
		}
	}
382
	n, err = cw.res.conn.bufw.Write(p)
383
	if cw.chunking && err == nil {
384
		_, err = cw.res.conn.bufw.Write(crlf)
385
	}
386 387 388
	if err != nil {
		cw.res.conn.rwc.Close()
	}
389 390 391 392 393 394 395
	return
}

func (cw *chunkWriter) flush() {
	if !cw.wroteHeader {
		cw.writeHeader(nil)
	}
396
	cw.res.conn.bufw.Flush()
397 398 399 400 401 402 403
}

func (cw *chunkWriter) close() {
	if !cw.wroteHeader {
		cw.writeHeader(nil)
	}
	if cw.chunking {
404
		bw := cw.res.conn.bufw // conn's bufio writer
405 406
		// zero chunk to mark EOF
		bw.WriteString("0\r\n")
407
		if trailers := cw.res.finalTrailers(); trailers != nil {
408 409 410 411 412
			trailers.Write(bw) // the writer handles noting errors
		}
		// final blank line after the trailers (whether
		// present or not)
		bw.WriteString("\r\n")
413 414 415
	}
}

416 417
// A response represents the server side of an HTTP response.
type response struct {
418 419 420 421 422 423 424 425
	conn             *conn
	req              *Request // request for this response
	reqBody          io.ReadCloser
	cancelCtx        context.CancelFunc // when ServeHTTP exits
	wroteHeader      bool               // reply header has been (logically) written
	wroteContinue    bool               // 100 Continue response was written
	wants10KeepAlive bool               // HTTP/1.0 w/ Connection "keep-alive"
	wantsClose       bool               // HTTP request has Connection "close"
426 427

	w  *bufio.Writer // buffers output in chunks to chunkWriter
428
	cw chunkWriter
429 430 431 432 433 434

	// handlerHeader is the Header that Handlers get access to,
	// which may be retained and mutated even after WriteHeader.
	// handlerHeader is copied into cw.header at WriteHeader
	// time, and privately mutated thereafter.
	handlerHeader Header
435
	calledHeader  bool // handler accessed handlerHeader via Header
436 437 438 439

	written       int64 // number of bytes written in body
	contentLength int64 // explicitly-declared Content-Length; or -1
	status        int   // status code passed to WriteHeader
440 441 442 443 444 445

	// close connection after this reply.  set on request and
	// updated after response from handler if there's a
	// "Connection: keep-alive" response header and a
	// Content-Length.
	closeAfterReply bool
446 447 448

	// requestBodyLimitHit is set by requestTooLarge when
	// maxBytesReader hits its max size. It is checked in
449
	// WriteHeader, to make sure we don't consume the
450
	// remaining request body to try to advance to the next HTTP
451
	// request. Instead, when this is set, we stop reading
452 453 454
	// subsequent requests on this connection and stop reading
	// input from it.
	requestBodyLimitHit bool
455

456
	// trailers are the headers to be sent after the handler
457
	// finishes writing the body. This field is initialized from
458 459 460 461
	// the Trailer response header when the response header is
	// written.
	trailers []string

462
	handlerDone atomicBool // set true when the handler exits
463

464 465 466 467
	// Buffers for Date, Content-Length, and status code
	dateBuf   [len(TimeFormat)]byte
	clenBuf   [10]byte
	statusBuf [3]byte
468

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
	// closeNotifyCh is the channel returned by CloseNotify.
	// TODO(bradfitz): this is currently (for Go 1.8) always
	// non-nil. Make this lazily-created again as it used to be?
	closeNotifyCh  chan bool
	didCloseNotify int32 // atomic (only 0->1 winner should send)
}

// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
// that, if present, signals that the map entry is actually for
// the response trailers, and not the response headers. The prefix
// is stripped after the ServeHTTP call finishes and the values are
// sent in the trailers.
//
// This mechanism is intended only for trailers that are not known
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
//    https://golang.org/pkg/net/http/#ResponseWriter
//    https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const TrailerPrefix = "Trailer:"

// finalTrailers is called after the Handler exits and returns a non-nil
// value if the Handler set any trailers.
func (w *response) finalTrailers() Header {
	var t Header
	for k, vv := range w.handlerHeader {
		if strings.HasPrefix(k, TrailerPrefix) {
			if t == nil {
				t = make(Header)
			}
			t[strings.TrimPrefix(k, TrailerPrefix)] = vv
		}
	}
	for _, k := range w.trailers {
		if t == nil {
			t = make(Header)
		}
		for _, v := range w.handlerHeader[k] {
			t.Add(k, v)
		}
	}
	return t
511 512
}

513 514 515 516 517
type atomicBool int32

func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setTrue()    { atomic.StoreInt32((*int32)(b), 1) }

518 519 520 521 522
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (w *response) declareTrailer(k string) {
	k = CanonicalHeaderKey(k)
523 524
	if !httpguts.ValidTrailerHeader(k) {
		// Forbidden by RFC 7230, section 4.1.2
525 526 527 528 529
		return
	}
	w.trailers = append(w.trailers, k)
}

530 531 532 533 534 535 536 537
// requestTooLarge is called by maxBytesReader when too much input has
// been read from the client.
func (w *response) requestTooLarge() {
	w.closeAfterReply = true
	w.requestBodyLimitHit = true
	if !w.wroteHeader {
		w.Header().Set("Connection", "close")
	}
538 539
}

540
// needsSniff reports whether a Content-Type still needs to be sniffed.
541
func (w *response) needsSniff() bool {
542 543
	_, haveType := w.handlerHeader["Content-Type"]
	return !w.cw.wroteHeader && !haveType && w.written < sniffLen
544 545
}

546 547
// writerOnly hides an io.Writer value's optional ReadFrom method
// from io.Copy.
548 549 550 551
type writerOnly struct {
	io.Writer
}

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
func srcIsRegularFile(src io.Reader) (isRegular bool, err error) {
	switch v := src.(type) {
	case *os.File:
		fi, err := v.Stat()
		if err != nil {
			return false, err
		}
		return fi.Mode().IsRegular(), nil
	case *io.LimitedReader:
		return srcIsRegularFile(v.R)
	default:
		return
	}
}

// ReadFrom is here to optimize copying from an *os.File regular file
// to a *net.TCPConn with sendfile.
569
func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
570 571 572 573 574 575 576 577 578
	// Our underlying w.conn.rwc is usually a *TCPConn (with its
	// own ReadFrom method). If not, or if our src isn't a regular
	// file, just fall back to the normal copy method.
	rf, ok := w.conn.rwc.(io.ReaderFrom)
	regFile, err := srcIsRegularFile(src)
	if err != nil {
		return 0, err
	}
	if !ok || !regFile {
579 580 581
		bufp := copyBufPool.Get().(*[]byte)
		defer copyBufPool.Put(bufp)
		return io.CopyBuffer(writerOnly{w}, src, *bufp)
582 583 584 585
	}

	// sendfile path:

586 587 588
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
589 590 591 592 593 594 595 596 597 598 599 600 601 602

	if w.needsSniff() {
		n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen))
		n += n0
		if err != nil {
			return n, err
		}
	}

	w.w.Flush()  // get rid of any previous writes
	w.cw.flush() // make sure Header is written; flush data to rwc

	// Now that cw has been flushed, its chunking field is guaranteed initialized.
	if !w.cw.chunking && w.bodyAllowed() {
603 604 605 606
		n0, err := rf.ReadFrom(src)
		n += n0
		w.written += n0
		return n, err
607
	}
608 609 610 611

	n0, err := io.Copy(writerOnly{w}, src)
	n += n0
	return n, err
612 613
}

614 615 616 617
// debugServerConnections controls whether all server connections are wrapped
// with a verbose logging wrapper.
const debugServerConnections = false

618
// Create new connection from rwc.
619 620 621 622 623
func (srv *Server) newConn(rwc net.Conn) *conn {
	c := &conn{
		server: srv,
		rwc:    rwc,
	}
624 625 626
	if debugServerConnections {
		c.rwc = newLoggingConn("server", c.rwc)
	}
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
	return c
}

type readResult struct {
	n   int
	err error
	b   byte // byte read, if n == 1
}

// connReader is the io.Reader wrapper used by *conn. It combines a
// selectively-activated io.LimitedReader (to bound request header
// read sizes) with support for selectively keeping an io.Reader.Read
// call blocked in a background goroutine to wait for activity and
// trigger a CloseNotifier channel.
type connReader struct {
642
	conn *conn
643

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
	mu      sync.Mutex // guards following
	hasByte bool
	byteBuf [1]byte
	cond    *sync.Cond
	inRead  bool
	aborted bool  // set true before conn.rwc deadline is set to past
	remain  int64 // bytes remaining
}

func (cr *connReader) lock() {
	cr.mu.Lock()
	if cr.cond == nil {
		cr.cond = sync.NewCond(&cr.mu)
	}
}

func (cr *connReader) unlock() { cr.mu.Unlock() }

func (cr *connReader) startBackgroundRead() {
	cr.lock()
	defer cr.unlock()
	if cr.inRead {
		panic("invalid concurrent Body.Read call")
	}
	if cr.hasByte {
		return
	}
	cr.inRead = true
	cr.conn.rwc.SetReadDeadline(time.Time{})
	go cr.backgroundRead()
}

func (cr *connReader) backgroundRead() {
	n, err := cr.conn.rwc.Read(cr.byteBuf[:])
	cr.lock()
	if n == 1 {
		cr.hasByte = true
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
		// We were past the end of the previous request's body already
		// (since we wouldn't be in a background read otherwise), so
		// this is a pipelined HTTP request. Prior to Go 1.11 we used to
		// send on the CloseNotify channel and cancel the context here,
		// but the behavior was documented as only "may", and we only
		// did that because that's how CloseNotify accidentally behaved
		// in very early Go releases prior to context support. Once we
		// added context support, people used a Handler's
		// Request.Context() and passed it along. Having that context
		// cancel on pipelined HTTP requests caused problems.
		// Fortunately, almost nothing uses HTTP/1.x pipelining.
		// Unfortunately, apt-get does, or sometimes does.
		// New Go 1.11 behavior: don't fire CloseNotify or cancel
		// contexts on pipelined requests. Shouldn't affect people, but
		// fixes cases like Issue 23921. This does mean that a client
		// closing their TCP connection after sending a pipelined
		// request won't cancel the context, but we'll catch that on any
		// write failure (in checkConnErrorWriter.Write).
		// If the server never writes, yes, there are still contrived
		// server & client behaviors where this fails to ever cancel the
		// context, but that's kinda why HTTP/1.x pipelining died
		// anyway.
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	}
	if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
		// Ignore this error. It's the expected error from
		// another goroutine calling abortPendingRead.
	} else if err != nil {
		cr.handleReadError(err)
	}
	cr.aborted = false
	cr.inRead = false
	cr.unlock()
	cr.cond.Broadcast()
}

func (cr *connReader) abortPendingRead() {
	cr.lock()
	defer cr.unlock()
	if !cr.inRead {
		return
	}
	cr.aborted = true
	cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
	for cr.inRead {
		cr.cond.Wait()
	}
	cr.conn.rwc.SetReadDeadline(time.Time{})
728 729 730
}

func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
731
func (cr *connReader) setInfiniteReadLimit()     { cr.remain = maxInt64 }
732 733
func (cr *connReader) hitReadLimit() bool        { return cr.remain <= 0 }

734 735
// handleReadError is called whenever a Read from the client returns a
// non-nil error.
736
//
737 738 739 740 741
// The provided non-nil err is almost always io.EOF or a "use of
// closed network connection". In any case, the error is not
// particularly interesting, except perhaps for debugging during
// development. Any error means the connection is dead and we should
// down its context.
742
//
743 744 745
// It may be called from multiple goroutines.
func (cr *connReader) handleReadError(_ error) {
	cr.conn.cancelCtx()
746 747 748 749 750 751
	cr.closeNotify()
}

// may be called from multiple goroutines.
func (cr *connReader) closeNotify() {
	res, _ := cr.conn.curReq.Load().(*response)
752 753
	if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) {
		res.closeNotifyCh <- true
754 755 756
	}
}

757
func (cr *connReader) Read(p []byte) (n int, err error) {
758 759 760
	cr.lock()
	if cr.inRead {
		cr.unlock()
761 762 763
		if cr.conn.hijacked() {
			panic("invalid Body.Read call. After hijacked, the original Request must not be used")
		}
764 765
		panic("invalid concurrent Body.Read call")
	}
766
	if cr.hitReadLimit() {
767
		cr.unlock()
768 769 770
		return 0, io.EOF
	}
	if len(p) == 0 {
771 772
		cr.unlock()
		return 0, nil
773 774 775 776
	}
	if int64(len(p)) > cr.remain {
		p = p[:cr.remain]
	}
777 778 779 780 781
	if cr.hasByte {
		p[0] = cr.byteBuf[0]
		cr.hasByte = false
		cr.unlock()
		return 1, nil
782
	}
783 784 785
	cr.inRead = true
	cr.unlock()
	n, err = cr.conn.rwc.Read(p)
786

787 788 789 790
	cr.lock()
	cr.inRead = false
	if err != nil {
		cr.handleReadError(err)
791
	}
792 793
	cr.remain -= int64(n)
	cr.unlock()
794

795 796
	cr.cond.Broadcast()
	return n, err
797 798
}

799
var (
800 801 802
	bufioReaderPool   sync.Pool
	bufioWriter2kPool sync.Pool
	bufioWriter4kPool sync.Pool
803 804
)

805 806 807 808 809 810 811
var copyBufPool = sync.Pool{
	New: func() interface{} {
		b := make([]byte, 32*1024)
		return &b
	},
}

812
func bufioWriterPool(size int) *sync.Pool {
813 814
	switch size {
	case 2 << 10:
815
		return &bufioWriter2kPool
816
	case 4 << 10:
817
		return &bufioWriter4kPool
818 819 820 821
	}
	return nil
}

822
func newBufioReader(r io.Reader) *bufio.Reader {
823 824 825 826
	if v := bufioReaderPool.Get(); v != nil {
		br := v.(*bufio.Reader)
		br.Reset(r)
		return br
827
	}
828
	// Note: if this reader size is ever changed, update
829
	// TestHandlerBodyClose's assumptions.
830
	return bufio.NewReader(r)
831 832
}

833 834
func putBufioReader(br *bufio.Reader) {
	br.Reset(nil)
835
	bufioReaderPool.Put(br)
836 837
}

838
func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
839 840 841 842 843 844 845
	pool := bufioWriterPool(size)
	if pool != nil {
		if v := pool.Get(); v != nil {
			bw := v.(*bufio.Writer)
			bw.Reset(w)
			return bw
		}
846
	}
847
	return bufio.NewWriterSize(w, size)
848 849
}

850 851
func putBufioWriter(bw *bufio.Writer) {
	bw.Reset(nil)
852 853
	if pool := bufioWriterPool(bw.Available()); pool != nil {
		pool.Put(bw)
854 855 856
	}
}

857 858 859 860 861 862 863 864 865 866 867 868
// DefaultMaxHeaderBytes is the maximum permitted size of the headers
// in an HTTP request.
// This can be overridden by setting Server.MaxHeaderBytes.
const DefaultMaxHeaderBytes = 1 << 20 // 1 MB

func (srv *Server) maxHeaderBytes() int {
	if srv.MaxHeaderBytes > 0 {
		return srv.MaxHeaderBytes
	}
	return DefaultMaxHeaderBytes
}

869
func (srv *Server) initialReadLimitSize() int64 {
870 871 872
	return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
}

873
// wrapper around io.ReadCloser which on first read, sends an
874 875 876 877
// HTTP/1.1 100 Continue header
type expectContinueReader struct {
	resp       *response
	readCloser io.ReadCloser
878
	closed     bool
879
	sawEOF     bool
880 881
}

882
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
883
	if ecr.closed {
884
		return 0, ErrBodyReadAfterClose
885
	}
886
	if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() {
887
		ecr.resp.wroteContinue = true
888 889
		ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
		ecr.resp.conn.bufw.Flush()
890
	}
891 892 893 894 895
	n, err = ecr.readCloser.Read(p)
	if err == io.EOF {
		ecr.sawEOF = true
	}
	return
896 897
}

898
func (ecr *expectContinueReader) Close() error {
899
	ecr.closed = true
900 901 902
	return ecr.readCloser.Close()
}

903 904 905 906 907 908
// TimeFormat is the time format to use when generating times in HTTP
// headers. It is like time.RFC1123 but hard-codes GMT as the time
// zone. The time being formatted must be in UTC for Format to
// generate the correct format.
//
// For parsing this time format, see ParseTime.
909 910
const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"

911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
func appendTime(b []byte, t time.Time) []byte {
	const days = "SunMonTueWedThuFriSat"
	const months = "JanFebMarAprMayJunJulAugSepOctNovDec"

	t = t.UTC()
	yy, mm, dd := t.Date()
	hh, mn, ss := t.Clock()
	day := days[3*t.Weekday():]
	mon := months[3*(mm-1):]

	return append(b,
		day[0], day[1], day[2], ',', ' ',
		byte('0'+dd/10), byte('0'+dd%10), ' ',
		mon[0], mon[1], mon[2], ' ',
		byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
		byte('0'+hh/10), byte('0'+hh%10), ':',
		byte('0'+mn/10), byte('0'+mn%10), ':',
		byte('0'+ss/10), byte('0'+ss%10), ' ',
		'G', 'M', 'T')
}

933
var errTooLarge = errors.New("http: request too large")
934

935
// Read next request from connection.
936
func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
937
	if c.hijacked() {
938 939
		return nil, ErrHijacked
	}
940

941 942 943 944 945 946 947 948
	var (
		wholeReqDeadline time.Time // or zero if none
		hdrDeadline      time.Time // or zero if none
	)
	t0 := time.Now()
	if d := c.server.readHeaderTimeout(); d != 0 {
		hdrDeadline = t0.Add(d)
	}
949
	if d := c.server.ReadTimeout; d != 0 {
950
		wholeReqDeadline = t0.Add(d)
951
	}
952
	c.rwc.SetReadDeadline(hdrDeadline)
953 954 955 956 957 958
	if d := c.server.WriteTimeout; d != 0 {
		defer func() {
			c.rwc.SetWriteDeadline(time.Now().Add(d))
		}()
	}

959
	c.r.setReadLimit(c.server.initialReadLimitSize())
960
	if c.lastMethod == "POST" {
961
		// RFC 7230 section 3 tolerance for old buggy clients.
962 963
		peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
		c.bufr.Discard(numLeadingCRorLF(peek))
964
	}
965 966 967
	req, err := readRequest(c.bufr, keepHostHeader)
	if err != nil {
		if c.r.hitReadLimit() {
968 969
			return nil, errTooLarge
		}
970 971
		return nil, err
	}
972 973 974 975 976

	if !http1ServerSupportsRequest(req) {
		return nil, badRequestError("unsupported protocol version")
	}

977
	c.lastMethod = req.Method
978 979 980
	c.r.setInfiniteReadLimit()

	hosts, haveHost := req.Header["Host"]
981
	isH2Upgrade := req.isH2Upgrade()
982
	if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
983 984 985 986 987
		return nil, badRequestError("missing required Host header")
	}
	if len(hosts) > 1 {
		return nil, badRequestError("too many Host headers")
	}
988
	if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
989 990 991
		return nil, badRequestError("malformed Host header")
	}
	for k, vv := range req.Header {
992
		if !httpguts.ValidHeaderFieldName(k) {
993 994 995
			return nil, badRequestError("invalid header name")
		}
		for _, v := range vv {
996
			if !httpguts.ValidHeaderFieldValue(v) {
997 998 999 1000 1001
				return nil, badRequestError("invalid header value")
			}
		}
	}
	delete(req.Header, "Host")
1002

1003 1004
	ctx, cancelCtx := context.WithCancel(ctx)
	req.ctx = ctx
1005 1006
	req.RemoteAddr = c.remoteAddr
	req.TLS = c.tlsState
1007 1008 1009
	if body, ok := req.Body.(*body); ok {
		body.doEarlyClose = true
	}
1010

1011 1012 1013 1014 1015
	// Adjust the read deadline if necessary.
	if !hdrDeadline.Equal(wholeReqDeadline) {
		c.rwc.SetReadDeadline(wholeReqDeadline)
	}

1016 1017
	w = &response{
		conn:          c,
1018
		cancelCtx:     cancelCtx,
1019
		req:           req,
1020
		reqBody:       req.Body,
1021 1022
		handlerHeader: make(Header),
		contentLength: -1,
1023
		closeNotifyCh: make(chan bool, 1),
1024 1025 1026 1027 1028 1029 1030 1031 1032

		// We populate these ahead of time so we're not
		// reading from req.Header after their Handler starts
		// and maybe mutates it (Issue 14940)
		wants10KeepAlive: req.wantsHttp10KeepAlive(),
		wantsClose:       req.wantsClose(),
	}
	if isH2Upgrade {
		w.closeAfterReply = true
1033 1034
	}
	w.cw.res = w
1035
	w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
1036 1037 1038
	return w, nil
}

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
// http1ServerSupportsRequest reports whether Go's HTTP/1.x server
// supports the given request.
func http1ServerSupportsRequest(req *Request) bool {
	if req.ProtoMajor == 1 {
		return true
	}
	// Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
	// wire up their own HTTP/2 upgrades.
	if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
		req.Method == "PRI" && req.RequestURI == "*" {
		return true
	}
	// Reject HTTP/0.x, and all other HTTP/2+ requests (which
	// aren't encoded in ASCII anyway).
	return false
}

1056
func (w *response) Header() Header {
1057 1058 1059 1060
	if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
		// Accessing the header between logically writing it
		// and physically writing it means we need to allocate
		// a clone to snapshot the logically written state.
1061
		w.cw.header = w.handlerHeader.Clone()
1062 1063
	}
	w.calledHeader = true
1064
	return w.handlerHeader
1065
}
1066

1067
// maxPostHandlerReadBytes is the max number of Request.Body bytes not
1068
// consumed by a handler that the server will read from the client
1069
// in order to keep a connection alive. If there are more bytes than
1070 1071 1072 1073 1074 1075 1076 1077
// this then the server to be paranoid instead sends a "Connection:
// close" response.
//
// This number is approximately what a typical machine's TCP buffer
// size is anyway.  (if we have the bytes on the machine, we might as
// well read them)
const maxPostHandlerReadBytes = 256 << 10

1078 1079 1080 1081
func checkWriteHeaderCode(code int) {
	// Issue 22880: require valid WriteHeader status codes.
	// For now we only enforce that it's three digits.
	// In the future we might block things over 599 (600 and above aren't defined
1082
	// at https://httpwg.org/specs/rfc7231.html#status.codes)
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
	// and we might block under 200 (once we have more mature 1xx support).
	// But for now any three digits.
	//
	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
	// no equivalent bogus thing we can realistically send in HTTP/2,
	// so we'll consistently panic instead and help people find their bugs
	// early. (We can't return an error from WriteHeader even if we wanted to.)
	if code < 100 || code > 999 {
		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
	}
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
// relevantCaller searches the call stack for the first function outside of net/http.
// The purpose of this function is to provide more helpful error messages.
func relevantCaller() runtime.Frame {
	pc := make([]uintptr, 16)
	n := runtime.Callers(1, pc)
	frames := runtime.CallersFrames(pc[:n])
	prefix1 := "net/http."
	prefix2 := "net/http."
	if runtime.Compiler == "gccgo" {
		prefix2 = "http."
	}
	var frame runtime.Frame
	for {
		frame, more := frames.Next()
		if !strings.HasPrefix(frame.Function, prefix1) && !strings.HasPrefix(frame.Function, prefix2) {
			return frame
		}
		if !more {
			break
		}
	}
	return frame
}

1119
func (w *response) WriteHeader(code int) {
1120
	if w.conn.hijacked() {
1121 1122
		caller := relevantCaller()
		w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
1123 1124 1125
		return
	}
	if w.wroteHeader {
1126 1127
		caller := relevantCaller()
		w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
1128 1129
		return
	}
1130
	checkWriteHeaderCode(code)
1131 1132 1133
	w.wroteHeader = true
	w.status = code

1134
	if w.calledHeader && w.cw.header == nil {
1135
		w.cw.header = w.handlerHeader.Clone()
1136
	}
1137

1138
	if cl := w.handlerHeader.get("Content-Length"); cl != "" {
1139 1140 1141
		v, err := strconv.ParseInt(cl, 10, 64)
		if err == nil && v >= 0 {
			w.contentLength = v
1142
		} else {
1143
			w.conn.server.logf("http: invalid Content-Length of %q", cl)
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
			w.handlerHeader.Del("Content-Length")
		}
	}
}

// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
// This type is used to avoid extra allocations from cloning and/or populating
// the response Header map and all its 1-element slices.
type extraHeader struct {
	contentType      string
	connection       string
	transferEncoding string
1156 1157
	date             []byte // written if not nil
	contentLength    []byte // written if not nil
1158 1159 1160 1161
}

// Sorted the same as extraHeader.Write's loop.
var extraHeaderKeys = [][]byte{
1162 1163 1164
	[]byte("Content-Type"),
	[]byte("Connection"),
	[]byte("Transfer-Encoding"),
1165 1166
}

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
var (
	headerContentLength = []byte("Content-Length: ")
	headerDate          = []byte("Date: ")
)

// Write writes the headers described in h to w.
//
// This method has a value receiver, despite the somewhat large size
// of h, because it prevents an allocation. The escape analysis isn't
// smart enough to realize this function doesn't mutate h.
func (h extraHeader) Write(w *bufio.Writer) {
	if h.date != nil {
		w.Write(headerDate)
		w.Write(h.date)
		w.Write(crlf)
	}
	if h.contentLength != nil {
		w.Write(headerContentLength)
		w.Write(h.contentLength)
		w.Write(crlf)
	}
	for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
1189 1190 1191
		if v != "" {
			w.Write(extraHeaderKeys[i])
			w.Write(colonSpace)
1192
			w.WriteString(v)
1193
			w.Write(crlf)
1194 1195
		}
	}
1196 1197 1198
}

// writeHeader finalizes the header sent to the client and writes it
1199
// to cw.res.conn.bufw.
1200 1201
//
// p is not written by writeHeader, but is the first chunk of the body
1202 1203
// that will be written. It is sniffed for a Content-Type if none is
// set explicitly. It's also used to set the Content-Length, if the
1204 1205 1206 1207 1208 1209 1210 1211 1212
// total body size was small and the handler has already finished
// running.
func (cw *chunkWriter) writeHeader(p []byte) {
	if cw.wroteHeader {
		return
	}
	cw.wroteHeader = true

	w := cw.res
1213
	keepAlivesEnabled := w.conn.server.doKeepAlives()
1214
	isHEAD := w.req.Method == "HEAD"
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240

	// header is written out to w.conn.buf below. Depending on the
	// state of the handler, we either own the map or not. If we
	// don't own it, the exclude map is created lazily for
	// WriteSubset to remove headers. The setHeader struct holds
	// headers we need to add.
	header := cw.header
	owned := header != nil
	if !owned {
		header = w.handlerHeader
	}
	var excludeHeader map[string]bool
	delHeader := func(key string) {
		if owned {
			header.Del(key)
			return
		}
		if _, ok := header[key]; !ok {
			return
		}
		if excludeHeader == nil {
			excludeHeader = make(map[string]bool)
		}
		excludeHeader[key] = true
	}
	var setHeader extraHeader
1241

1242
	// Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
1243
	trailers := false
1244 1245 1246 1247 1248 1249 1250 1251 1252
	for k := range cw.header {
		if strings.HasPrefix(k, TrailerPrefix) {
			if excludeHeader == nil {
				excludeHeader = make(map[string]bool)
			}
			excludeHeader[k] = true
			trailers = true
		}
	}
1253 1254 1255 1256 1257 1258 1259 1260
	for _, v := range cw.header["Trailer"] {
		trailers = true
		foreachHeaderElement(v, cw.res.declareTrailer)
	}

	te := header.get("Transfer-Encoding")
	hasTE := te != ""

1261 1262 1263 1264
	// If the handler is done but never sent a Content-Length
	// response header and this is our first (and last) write, set
	// it, even to zero. This helps HTTP/1.0 clients keep their
	// "keep-alive" connections alive.
1265
	// Exceptions: 304/204/1xx responses never get Content-Length, and if
1266 1267
	// it was a HEAD request, we don't know the difference between
	// 0 actual bytes and 0 bytes because the handler noticed it
1268
	// was a HEAD request and chose not to write anything. So for
1269
	// HEAD, the handler should either write the Content-Length or
1270
	// write non-zero bytes. If it's actually 0 bytes and the
1271 1272
	// handler never looked at the Request.Method, we just don't
	// send a Content-Length header.
1273 1274
	// Further, we don't send an automatic Content-Length if they
	// set a Transfer-Encoding, because they're generally incompatible.
1275
	if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
1276
		w.contentLength = int64(len(p))
1277
		setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
1278 1279 1280 1281
	}

	// If this was an HTTP/1.0 request with keep-alive and we sent a
	// Content-Length back, we can make this a keep-alive response ...
1282
	if w.wants10KeepAlive && keepAlivesEnabled {
1283 1284
		sentLength := header.get("Content-Length") != ""
		if sentLength && header.get("Connection") == "keep-alive" {
1285 1286 1287 1288
			w.closeAfterReply = false
		}
	}

1289
	// Check for an explicit (and valid) Content-Length header.
1290
	hasCL := w.contentLength != -1
1291

1292
	if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
1293
		_, connectionHeaderSet := header["Connection"]
1294
		if !connectionHeaderSet {
1295
			setHeader.connection = "keep-alive"
1296
		}
1297
	} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
1298 1299 1300
		w.closeAfterReply = true
	}

1301
	if header.get("Connection") == "close" || !keepAlivesEnabled {
1302 1303
		w.closeAfterReply = true
	}
1304

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	// If the client wanted a 100-continue but we never sent it to
	// them (or, more strictly: we never finished reading their
	// request body), don't reuse this connection because it's now
	// in an unknown state: we might be sending this response at
	// the same time the client is now sending its request body
	// after a timeout.  (Some HTTP clients send Expect:
	// 100-continue but knowing that some servers don't support
	// it, the clients set a timer and send the body later anyway)
	// If we haven't seen EOF, we can't skip over the unread body
	// because we don't know if the next bytes on the wire will be
	// the body-following-the-timer or the subsequent request.
	// See Issue 11549.
	if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF {
		w.closeAfterReply = true
	}

1321
	// Per RFC 2616, we should consume the request body before
1322
	// replying, if the handler hasn't already done so. But we
1323 1324
	// don't want to do an unbounded amount of reading here for
	// DoS reasons, so we only try up to a threshold.
1325 1326 1327
	// TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
	// about HTTP/1.x Handlers concurrently reading and writing, like
	// HTTP/2 handlers can do. Maybe this code should be relaxed?
1328
	if w.req.ContentLength != 0 && !w.closeAfterReply {
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
		var discard, tooBig bool

		switch bdy := w.req.Body.(type) {
		case *expectContinueReader:
			if bdy.resp.wroteContinue {
				discard = true
			}
		case *body:
			bdy.mu.Lock()
			switch {
			case bdy.closed:
				if !bdy.sawEOF {
					// Body was closed in handler with non-EOF error.
					w.closeAfterReply = true
				}
			case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
				tooBig = true
			default:
				discard = true
1348
			}
1349 1350 1351 1352 1353 1354
			bdy.mu.Unlock()
		default:
			discard = true
		}

		if discard {
1355
			_, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1)
1356 1357 1358 1359 1360 1361 1362 1363
			switch err {
			case nil:
				// There must be even more data left over.
				tooBig = true
			case ErrBodyReadAfterClose:
				// Body was already consumed and closed.
			case io.EOF:
				// The remaining body was just consumed, close it.
1364
				err = w.reqBody.Close()
1365 1366 1367 1368
				if err != nil {
					w.closeAfterReply = true
				}
			default:
1369
				// Some other kind of error occurred, like a read timeout, or
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
				// corrupt chunked encoding. In any case, whatever remains
				// on the wire must not be parsed as another HTTP request.
				w.closeAfterReply = true
			}
		}

		if tooBig {
			w.requestTooLarge()
			delHeader("Connection")
			setHeader.connection = "close"
1380 1381 1382
		}
	}

1383
	code := w.status
1384
	if bodyAllowedForStatus(code) {
1385
		// If no content type, apply sniffing algorithm to body.
1386
		_, haveType := header["Content-Type"]
1387
		if !haveType && !hasTE && len(p) > 0 {
1388
			setHeader.contentType = DetectContentType(p)
1389
		}
1390 1391 1392 1393
	} else {
		for _, k := range suppressedHeaders(code) {
			delHeader(k)
		}
1394 1395
	}

1396
	if !header.has("Date") {
1397
		setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
1398 1399 1400 1401 1402
	}

	if hasCL && hasTE && te != "identity" {
		// TODO: return an error if WriteHeader gets a return parameter
		// For now just ignore the Content-Length.
1403
		w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
1404
			te, w.contentLength)
1405
		delHeader("Content-Length")
1406 1407 1408
		hasCL = false
	}

1409
	if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) {
1410
		// do nothing
1411
	} else if code == StatusNoContent {
1412
		delHeader("Transfer-Encoding")
1413
	} else if hasCL {
1414
		delHeader("Transfer-Encoding")
1415
	} else if w.req.ProtoAtLeast(1, 1) {
1416
		// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
		// content-length has been provided. The connection must be closed after the
		// reply is written, and no chunking is to be done. This is the setup
		// recommended in the Server-Sent Events candidate recommendation 11,
		// section 8.
		if hasTE && te == "identity" {
			cw.chunking = false
			w.closeAfterReply = true
		} else {
			// HTTP/1.1 or greater: use chunked transfer encoding
			// to avoid closing the connection at EOF.
			cw.chunking = true
			setHeader.transferEncoding = "chunked"
1429 1430 1431 1432
			if hasTE && te == "chunked" {
				// We will send the chunked Transfer-Encoding header later.
				delHeader("Transfer-Encoding")
			}
1433
		}
1434 1435 1436 1437 1438
	} else {
		// HTTP version < 1.1: cannot do chunked transfer
		// encoding and we don't know the Content-Length so
		// signal EOF by closing connection.
		w.closeAfterReply = true
1439
		delHeader("Transfer-Encoding") // in case already set
1440 1441
	}

1442
	// Cannot use Content-Length with non-identity Transfer-Encoding.
1443
	if cw.chunking {
1444
		delHeader("Content-Length")
1445
	}
1446 1447 1448
	if !w.req.ProtoAtLeast(1, 0) {
		return
	}
1449

1450
	if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) {
1451
		delHeader("Connection")
1452 1453 1454
		if w.req.ProtoAtLeast(1, 1) {
			setHeader.connection = "close"
		}
1455 1456
	}

1457
	writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
1458 1459 1460
	cw.header.WriteSubset(w.conn.bufw, excludeHeader)
	setHeader.Write(w.conn.bufw)
	w.conn.bufw.Write(crlf)
1461 1462
}

1463
// foreachHeaderElement splits v according to the "#rule" construction
1464
// in RFC 7230 section 7 and calls fn for each non-empty element.
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
func foreachHeaderElement(v string, fn func(string)) {
	v = textproto.TrimString(v)
	if v == "" {
		return
	}
	if !strings.Contains(v, ",") {
		fn(v)
		return
	}
	for _, f := range strings.Split(v, ",") {
		if f = textproto.TrimString(f); f != "" {
			fn(f)
		}
	}
}

1481
// writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
1482 1483 1484 1485 1486 1487 1488 1489
// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
// code is the response status code.
// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
	if is11 {
		bw.WriteString("HTTP/1.1 ")
	} else {
		bw.WriteString("HTTP/1.0 ")
1490
	}
1491 1492 1493 1494 1495 1496 1497 1498
	if text, ok := statusText[code]; ok {
		bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
		bw.WriteByte(' ')
		bw.WriteString(text)
		bw.WriteString("\r\n")
	} else {
		// don't worry about performance
		fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
1499
	}
1500 1501
}

1502
// bodyAllowed reports whether a Write is allowed for this response type.
1503 1504 1505 1506 1507
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
	if !w.wroteHeader {
		panic("")
	}
1508
	return bodyAllowedForStatus(w.status)
1509 1510
}

1511 1512 1513
// The Life Of A Write is like this:
//
// Handler starts. No header has been sent. The handler can either
1514
// write a header, or just start writing. Writing before sending a header
1515
// sends an implicitly empty 200 OK header.
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
//
// If the handler didn't declare a Content-Length up front, we either
// go into chunking mode or, if the handler finishes running before
// the chunking buffer size, we compute a Content-Length and send that
// in the header instead.
//
// Likewise, if the handler didn't set a Content-Type, we sniff that
// from the initial chunk of output.
//
// The Writers are wired together like:
//
// 1. *response (the ResponseWriter) ->
// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes
// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
//    and which writes the chunk headers, if needed.
1531 1532 1533 1534
// 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to ->
// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
//    and populates c.werr with it if so. but otherwise writes to:
// 6. the rwc, the net.Conn.
1535 1536 1537 1538 1539
//
// TODO(bradfitz): short-circuit some of the buffering when the
// initial header contains both a Content-Type and Content-Length.
// Also short-circuit in (1) when the header's been sent and not in
// chunking mode, writing directly to (4) instead, if (2) has no
1540
// buffered data. More generally, we could short-circuit from (1) to
1541 1542 1543 1544
// (3) even in chunking mode if the write size from (1) is over some
// threshold and nothing is in (2).  The answer might be mostly making
// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
// with this instead.
1545
func (w *response) Write(data []byte) (n int, err error) {
1546 1547 1548 1549 1550 1551 1552 1553 1554
	return w.write(len(data), data, "")
}

func (w *response) WriteString(data string) (n int, err error) {
	return w.write(len(data), nil, data)
}

// either dataB or dataS is non-zero.
func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
1555
	if w.conn.hijacked() {
1556
		if lenData > 0 {
1557 1558
			caller := relevantCaller()
			w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
1559
		}
1560 1561 1562 1563 1564
		return 0, ErrHijacked
	}
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
1565
	if lenData == 0 {
1566 1567
		return 0, nil
	}
1568
	if !w.bodyAllowed() {
1569 1570 1571
		return 0, ErrBodyNotAllowed
	}

1572
	w.written += int64(lenData) // ignoring errors, for errorKludge
1573 1574 1575
	if w.contentLength != -1 && w.written > w.contentLength {
		return 0, ErrContentLength
	}
1576 1577 1578 1579 1580
	if dataB != nil {
		return w.w.Write(dataB)
	} else {
		return w.w.WriteString(dataS)
	}
1581 1582 1583
}

func (w *response) finishRequest() {
1584
	w.handlerDone.setTrue()
1585

1586 1587 1588
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
1589 1590

	w.w.Flush()
1591
	putBufioWriter(w.w)
1592
	w.cw.close()
1593
	w.conn.bufw.Flush()
1594

1595 1596
	w.conn.r.abortPendingRead()

1597 1598
	// Close the body (regardless of w.closeAfterReply) so we can
	// re-use its bufio.Reader later safely.
1599
	w.reqBody.Close()
1600

1601 1602 1603
	if w.req.MultipartForm != nil {
		w.req.MultipartForm.RemoveAll()
	}
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
}

// shouldReuseConnection reports whether the underlying TCP connection can be reused.
// It must only be called after the handler is done executing.
func (w *response) shouldReuseConnection() bool {
	if w.closeAfterReply {
		// The request or something set while executing the
		// handler indicated we shouldn't reuse this
		// connection.
		return false
	}
1615

1616
	if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
1617
		// Did not write enough. Avoid getting out of sync.
1618
		return false
1619
	}
1620 1621 1622 1623

	// There was some error writing to the underlying connection
	// during the request, so don't re-use this conn.
	if w.conn.werr != nil {
1624
		return false
1625
	}
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636

	if w.closedRequestBodyEarly() {
		return false
	}

	return true
}

func (w *response) closedRequestBodyEarly() bool {
	body, ok := w.req.Body.(*body)
	return ok && body.didEarlyClose()
1637 1638 1639 1640 1641 1642
}

func (w *response) Flush() {
	if !w.wroteHeader {
		w.WriteHeader(StatusOK)
	}
1643 1644
	w.w.Flush()
	w.cw.flush()
1645 1646
}

1647
func (c *conn) finalFlush() {
1648
	if c.bufr != nil {
1649 1650
		// Steal the bufio.Reader (~4KB worth of memory) and its associated
		// reader for a future connection.
1651 1652 1653
		putBufioReader(c.bufr)
		c.bufr = nil
	}
1654

1655 1656
	if c.bufw != nil {
		c.bufw.Flush()
1657 1658
		// Steal the bufio.Writer (~4KB worth of memory) and its associated
		// writer for a future connection.
1659 1660
		putBufioWriter(c.bufw)
		c.bufw = nil
1661
	}
1662 1663 1664 1665 1666
}

// Close the connection.
func (c *conn) close() {
	c.finalFlush()
1667
	c.rwc.Close()
1668 1669
}

1670 1671 1672 1673 1674 1675 1676 1677 1678
// rstAvoidanceDelay is the amount of time we sleep after closing the
// write side of a TCP connection before closing the entire socket.
// By sleeping, we increase the chances that the client sees our FIN
// and processes its final data before they process the subsequent RST
// from closing a connection with known unread data.
// This RST seems to occur mostly on BSD systems. (And Windows?)
// This timeout is somewhat arbitrary (~latency around the planet).
const rstAvoidanceDelay = 500 * time.Millisecond

1679 1680 1681 1682 1683 1684
type closeWriter interface {
	CloseWrite() error
}

var _ closeWriter = (*net.TCPConn)(nil)

1685
// closeWrite flushes any outstanding data and sends a FIN packet (if
1686
// client is connected via TCP), signalling that we're done. We then
1687
// pause for a bit, hoping the client processes it before any
1688 1689
// subsequent RST.
//
1690
// See https://golang.org/issue/3595
1691
func (c *conn) closeWriteAndWait() {
1692
	c.finalFlush()
1693
	if tcp, ok := c.rwc.(closeWriter); ok {
1694 1695
		tcp.CloseWrite()
	}
1696
	time.Sleep(rstAvoidanceDelay)
1697 1698
}

1699
// validNPN reports whether the proto is not a blacklisted Next
1700
// Protocol Negotiation protocol. Empty and built-in protocol types
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
// are blacklisted and can't be overridden with alternate
// implementations.
func validNPN(proto string) bool {
	switch proto {
	case "", "http/1.1", "http/1.0":
		return false
	}
	return true
}

1711
func (c *conn) setState(nc net.Conn, state ConnState) {
1712 1713 1714 1715 1716 1717 1718
	srv := c.server
	switch state {
	case StateNew:
		srv.trackConn(c, true)
	case StateHijacked, StateClosed:
		srv.trackConn(c, false)
	}
1719 1720 1721 1722 1723
	if state > 0xff || state < 0 {
		panic("internal error")
	}
	packedState := uint64(time.Now().Unix()<<8) | uint64(state)
	atomic.StoreUint64(&c.curState.atomic, packedState)
1724
	if hook := srv.ConnState; hook != nil {
1725 1726 1727 1728
		hook(nc, state)
	}
}

1729 1730 1731
func (c *conn) getState() (state ConnState, unixSec int64) {
	packedState := atomic.LoadUint64(&c.curState.atomic)
	return ConnState(packedState & 0xff), int64(packedState >> 8)
1732 1733
}

1734 1735
// badRequestError is a literal string (used by in the server in HTML,
// unescaped) to tell the user why their request was bad. It should
1736
// be plain text without user info or other embedded errors.
1737 1738 1739 1740
type badRequestError string

func (e badRequestError) Error() string { return "Bad Request: " + string(e) }

1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
// ErrAbortHandler is a sentinel panic value to abort a handler.
// While any panic from ServeHTTP aborts the response to the client,
// panicking with ErrAbortHandler also suppresses logging of a stack
// trace to the server's error log.
var ErrAbortHandler = errors.New("net/http: abort Handler")

// isCommonNetReadError reports whether err is a common error
// encountered during reading a request off the network when the
// client has gone away or had its read fail somehow. This is used to
// determine which logs are interesting enough to log about.
func isCommonNetReadError(err error) bool {
	if err == io.EOF {
		return true
	}
	if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
		return true
	}
	if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
		return true
	}
	return false
}

1764
// Serve a new connection.
1765
func (c *conn) serve(ctx context.Context) {
1766
	c.remoteAddr = c.rwc.RemoteAddr().String()
1767
	ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
1768
	defer func() {
1769
		if err := recover(); err != nil && err != ErrAbortHandler {
1770
			const size = 64 << 10
1771 1772
			buf := make([]byte, size)
			buf = buf[:runtime.Stack(buf, false)]
1773
			c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
1774
		}
1775 1776
		if !c.hijacked() {
			c.close()
1777
			c.setState(c.rwc, StateClosed)
1778
		}
1779 1780
	}()

1781
	if tlsConn, ok := c.rwc.(*tls.Conn); ok {
1782 1783 1784 1785 1786 1787
		if d := c.server.ReadTimeout; d != 0 {
			c.rwc.SetReadDeadline(time.Now().Add(d))
		}
		if d := c.server.WriteTimeout; d != 0 {
			c.rwc.SetWriteDeadline(time.Now().Add(d))
		}
1788
		if err := tlsConn.Handshake(); err != nil {
1789 1790 1791 1792 1793 1794 1795 1796
			// If the handshake failed due to the client not speaking
			// TLS, assume they're speaking plaintext HTTP and write a
			// 400 response on the TLS conn's underlying net.Conn.
			if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
				io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
				re.Conn.Close()
				return
			}
1797
			c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
1798 1799 1800 1801
			return
		}
		c.tlsState = new(tls.ConnectionState)
		*c.tlsState = tlsConn.ConnectionState()
1802 1803
		if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) {
			if fn := c.server.TLSNextProto[proto]; fn != nil {
1804
				h := initNPNRequest{ctx, tlsConn, serverHandler{c.server}}
1805 1806 1807 1808
				fn(c.server, tlsConn, h)
			}
			return
		}
1809 1810
	}

1811 1812 1813
	// HTTP/1.x from here on.

	ctx, cancelCtx := context.WithCancel(ctx)
1814
	c.cancelCtx = cancelCtx
1815 1816
	defer cancelCtx()

1817 1818 1819 1820
	c.r = &connReader{conn: c}
	c.bufr = newBufioReader(c.r)
	c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)

1821
	for {
1822
		w, err := c.readRequest(ctx)
1823
		if c.r.remain != c.server.initialReadLimitSize() {
1824 1825 1826
			// If we read any bytes off the wire, we're active.
			c.setState(c.rwc, StateActive)
		}
1827
		if err != nil {
1828 1829
			const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"

1830 1831
			switch {
			case err == errTooLarge:
1832 1833 1834 1835
				// Their HTTP client may or may not be
				// able to read this if we're
				// responding to them and hanging up
				// while they're still writing their
1836
				// request. Undefined behavior.
1837 1838
				const publicErr = "431 Request Header Fields Too Large"
				fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
1839
				c.closeWriteAndWait()
1840
				return
1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854

			case isUnsupportedTEError(err):
				// Respond as per RFC 7230 Section 3.3.1 which says,
				//      A server that receives a request message with a
				//      transfer coding it does not understand SHOULD
				//      respond with 501 (Unimplemented).
				code := StatusNotImplemented

				// We purposefully aren't echoing back the transfer-encoding's value,
				// so as to mitigate the risk of cross side scripting by an attacker.
				fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
				return

			case isCommonNetReadError(err):
1855
				return // don't reply
1856

1857 1858 1859 1860 1861
			default:
				publicErr := "400 Bad Request"
				if v, ok := err.(badRequestError); ok {
					publicErr = publicErr + ": " + string(v)
				}
1862

1863 1864 1865
				fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
				return
			}
1866
		}
1867 1868 1869 1870

		// Expect 100 Continue support
		req := w.req
		if req.expectsContinue() {
1871
			if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
1872 1873 1874
				// Wrap the Body reader with one that replies on the connection
				req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
			}
1875
		} else if req.Header.get("Expect") != "" {
1876
			w.sendExpectationFailed()
1877
			return
1878 1879
		}

1880 1881 1882 1883 1884 1885 1886 1887
		c.curReq.Store(w)

		if requestBodyRemains(req.Body) {
			registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
		} else {
			w.conn.r.startBackgroundRead()
		}

1888 1889 1890
		// HTTP cannot have multiple simultaneous active requests.[*]
		// Until the server replies to this request, it can't read another,
		// so we might as well run the handler in this goroutine.
1891
		// [*] Not strictly true: HTTP pipelining. We could let them all process
1892
		// in parallel even if their responses need to be serialized.
1893 1894
		// But we're not going to implement HTTP pipelining because it
		// was never deployed in the wild and the answer is HTTP/2.
1895
		serverHandler{c.server}.ServeHTTP(w, w.req)
1896
		w.cancelCtx()
1897
		if c.hijacked() {
1898 1899 1900
			return
		}
		w.finishRequest()
1901 1902
		if !w.shouldReuseConnection() {
			if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
1903
				c.closeWriteAndWait()
1904
			}
1905
			return
1906
		}
1907
		c.setState(c.rwc, StateIdle)
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
		c.curReq.Store((*response)(nil))

		if !w.conn.server.doKeepAlives() {
			// We're in shutdown mode. We might've replied
			// to the user without "Connection: close" and
			// they might think they can send another
			// request, but such is life with HTTP/1.1.
			return
		}

		if d := c.server.idleTimeout(); d != 0 {
			c.rwc.SetReadDeadline(time.Now().Add(d))
			if _, err := c.bufr.Peek(4); err != nil {
				return
			}
		}
		c.rwc.SetReadDeadline(time.Time{})
1925 1926 1927
	}
}

1928 1929 1930 1931
func (w *response) sendExpectationFailed() {
	// TODO(bradfitz): let ServeHTTP handlers handle
	// requests with non-standard expectation[s]? Seems
	// theoretical at best, and doesn't fit into the
1932
	// current ServeHTTP model anyway. We'd need to
1933 1934 1935
	// make the ResponseWriter an optional
	// "ExpectReplier" interface or something.
	//
1936 1937 1938 1939 1940
	// For now we'll just obey RFC 7231 5.1.1 which says
	// "A server that receives an Expect field-value other
	// than 100-continue MAY respond with a 417 (Expectation
	// Failed) status code to indicate that the unexpected
	// expectation cannot be met."
1941 1942 1943 1944 1945
	w.Header().Set("Connection", "close")
	w.WriteHeader(StatusExpectationFailed)
	w.finishRequest()
}

1946 1947
// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
// and a Hijacker.
1948
func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
1949 1950 1951
	if w.handlerDone.isSet() {
		panic("net/http: Hijack called after ServeHTTP finished")
	}
1952 1953 1954
	if w.wroteHeader {
		w.cw.flush()
	}
1955 1956 1957 1958 1959

	c := w.conn
	c.mu.Lock()
	defer c.mu.Unlock()

1960 1961
	// Release the bufioWriter that writes to the chunk writer, it is not
	// used after a connection has been hijacked.
1962
	rwc, buf, err = c.hijackLocked()
1963 1964 1965 1966 1967
	if err == nil {
		putBufioWriter(w.w)
		w.w = nil
	}
	return rwc, buf, err
1968 1969 1970
}

func (w *response) CloseNotify() <-chan bool {
1971 1972 1973
	if w.handlerDone.isSet() {
		panic("net/http: CloseNotify called after ServeHTTP finished")
	}
1974
	return w.closeNotifyCh
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
}

func registerOnHitEOF(rc io.ReadCloser, fn func()) {
	switch v := rc.(type) {
	case *expectContinueReader:
		registerOnHitEOF(v.readCloser, fn)
	case *body:
		v.registerOnHitEOF(fn)
	default:
		panic("unexpected type " + fmt.Sprintf("%T", rc))
	}
}

// requestBodyRemains reports whether future calls to Read
// on rc might yield more data.
func requestBodyRemains(rc io.ReadCloser) bool {
1991
	if rc == NoBody {
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
		return false
	}
	switch v := rc.(type) {
	case *expectContinueReader:
		return requestBodyRemains(v.readCloser)
	case *body:
		return v.bodyRemains()
	default:
		panic("unexpected type " + fmt.Sprintf("%T", rc))
	}
2002 2003 2004
}

// The HandlerFunc type is an adapter to allow the use of
2005
// ordinary functions as HTTP handlers. If f is a function
2006
// with the appropriate signature, HandlerFunc(f) is a
2007
// Handler that calls f.
2008 2009
type HandlerFunc func(ResponseWriter, *Request)

2010
// ServeHTTP calls f(w, r).
2011 2012 2013 2014 2015 2016 2017
func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
	f(w, r)
}

// Helper handlers

// Error replies to the request with the specified error message and HTTP code.
2018 2019
// It does not otherwise end the request; the caller should ensure no further
// writes are done to w.
2020
// The error message should be plain text.
2021
func Error(w ResponseWriter, error string, code int) {
2022
	w.Header().Set("Content-Type", "text/plain; charset=utf-8")
2023
	w.Header().Set("X-Content-Type-Options", "nosniff")
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
	w.WriteHeader(code)
	fmt.Fprintln(w, error)
}

// NotFound replies to the request with an HTTP 404 not found error.
func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }

// NotFoundHandler returns a simple request handler
// that replies to each request with a ``404 page not found'' reply.
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }

2035 2036 2037 2038 2039 2040
// StripPrefix returns a handler that serves HTTP requests
// by removing the given prefix from the request URL's Path
// and invoking the handler h. StripPrefix handles a
// request for a path that doesn't begin with prefix by
// replying with an HTTP 404 not found error.
func StripPrefix(prefix string, h Handler) Handler {
2041 2042 2043
	if prefix == "" {
		return h
	}
2044
	return HandlerFunc(func(w ResponseWriter, r *Request) {
2045
		if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) {
2046 2047 2048 2049 2050 2051
			r2 := new(Request)
			*r2 = *r
			r2.URL = new(url.URL)
			*r2.URL = *r.URL
			r2.URL.Path = p
			h.ServeHTTP(w, r2)
2052
		} else {
2053 2054 2055 2056 2057
			NotFound(w, r)
		}
	})
}

2058 2059
// Redirect replies to the request with a redirect to url,
// which may be a path relative to the request path.
2060 2061 2062
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
2063 2064 2065 2066 2067
//
// If the Content-Type header has not been set, Redirect sets it
// to "text/html; charset=utf-8" and writes a small HTML body.
// Setting the Content-Type header to any value, including nil,
// disables that behavior.
2068 2069 2070
func Redirect(w ResponseWriter, r *Request, url string, code int) {
	// parseURL is just url.Parse (url is shadowed for godoc).
	if u, err := parseURL(url); err == nil {
2071
		// If url was relative, make its path absolute by
2072
		// combining with request path.
2073
		// The client would probably do this for us,
2074
		// but doing it ourselves is more reliable.
2075
		// See RFC 7231, section 7.1.2
2076 2077 2078 2079 2080 2081
		if u.Scheme == "" && u.Host == "" {
			oldpath := r.URL.Path
			if oldpath == "" { // should not happen, but avoid a crash if it does
				oldpath = "/"
			}

2082
			// no leading http://server
2083
			if url == "" || url[0] != '/' {
2084 2085
				// make relative path absolute
				olddir, _ := path.Split(oldpath)
2086
				url = olddir + url
2087 2088 2089
			}

			var query string
2090 2091
			if i := strings.Index(url, "?"); i != -1 {
				url, query = url[:i], url[i:]
2092
			}
2093

2094
			// clean up but preserve trailing slash
2095 2096 2097 2098
			trailing := strings.HasSuffix(url, "/")
			url = path.Clean(url)
			if trailing && !strings.HasSuffix(url, "/") {
				url += "/"
2099
			}
2100
			url += query
2101 2102 2103
		}
	}

2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
	h := w.Header()

	// RFC 7231 notes that a short HTML body is usually included in
	// the response because older user agents may not understand 301/307.
	// Do it only if the request didn't already have a Content-Type header.
	_, hadCT := h["Content-Type"]

	h.Set("Location", hexEscapeNonASCII(url))
	if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
		h.Set("Content-Type", "text/html; charset=utf-8")
2114
	}
2115
	w.WriteHeader(code)
2116

2117 2118 2119 2120
	// Shouldn't send the body for POST or HEAD; that leaves GET.
	if !hadCT && r.Method == "GET" {
		body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n"
		fmt.Fprintln(w, body)
2121 2122 2123
	}
}

2124 2125 2126 2127
// parseURL is just url.Parse. It exists only so that url.Parse can be called
// in places where url is shadowed for godoc. See https://golang.org/cl/49930.
var parseURL = url.Parse

2128 2129 2130 2131
var htmlReplacer = strings.NewReplacer(
	"&", "&amp;",
	"<", "&lt;",
	">", "&gt;",
2132 2133 2134 2135
	// "&#34;" is shorter than "&quot;".
	`"`, "&#34;",
	// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
	"'", "&#39;",
2136 2137
)

2138
func htmlEscape(s string) string {
2139
	return htmlReplacer.Replace(s)
2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
}

// Redirect to a fixed URL
type redirectHandler struct {
	url  string
	code int
}

func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
	Redirect(w, r, rh.url, rh.code)
}

// RedirectHandler returns a request handler that redirects
// each request it receives to the given url using the given
// status code.
2155 2156 2157
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
2158 2159 2160 2161 2162 2163 2164 2165 2166
func RedirectHandler(url string, code int) Handler {
	return &redirectHandler{url, code}
}

// ServeMux is an HTTP request multiplexer.
// It matches the URL of each incoming request against a list of registered
// patterns and calls the handler for the pattern that
// most closely matches the URL.
//
2167
// Patterns name fixed, rooted paths, like "/favicon.ico",
2168
// or rooted subtrees, like "/images/" (note the trailing slash).
2169 2170 2171 2172
// Longer patterns take precedence over shorter ones, so that
// if there are handlers registered for both "/images/"
// and "/images/thumbnails/", the latter handler will be
// called for paths beginning "/images/thumbnails/" and the
2173
// former will receive requests for any other paths in the
2174 2175
// "/images/" subtree.
//
2176 2177 2178 2179
// Note that since a pattern ending in a slash names a rooted subtree,
// the pattern "/" matches all paths not matched by other registered
// patterns, not just the URL with Path == "/".
//
2180 2181 2182 2183 2184 2185 2186 2187
// If a subtree has been registered and a request is received naming the
// subtree root without its trailing slash, ServeMux redirects that
// request to the subtree root (adding the trailing slash). This behavior can
// be overridden with a separate registration for the path without
// the trailing slash. For example, registering "/images/" causes ServeMux
// to redirect a request for "/images" to "/images/", unless "/images" has
// been registered separately.
//
2188
// Patterns may optionally begin with a host name, restricting matches to
2189
// URLs on that host only. Host-specific patterns take precedence over
2190 2191 2192
// general patterns, so that a handler might register for the two patterns
// "/codesearch" and "codesearch.google.com/" without also taking over
// requests for "http://www.google.com/".
2193
//
2194 2195 2196
// ServeMux also takes care of sanitizing the URL request path and the Host
// header, stripping the port number and redirecting any request containing . or
// .. elements or repeated slashes to an equivalent, cleaner URL.
2197
type ServeMux struct {
2198 2199
	mu    sync.RWMutex
	m     map[string]muxEntry
2200 2201
	es    []muxEntry // slice of entries sorted from longest to shortest.
	hosts bool       // whether any patterns contain hostnames
2202 2203 2204
}

type muxEntry struct {
2205 2206
	h       Handler
	pattern string
2207 2208 2209
}

// NewServeMux allocates and returns a new ServeMux.
2210
func NewServeMux() *ServeMux { return new(ServeMux) }
2211 2212

// DefaultServeMux is the default ServeMux used by Serve.
2213 2214 2215
var DefaultServeMux = &defaultServeMux

var defaultServeMux ServeMux
2216

2217
// cleanPath returns the canonical path for p, eliminating . and .. elements.
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
func cleanPath(p string) string {
	if p == "" {
		return "/"
	}
	if p[0] != '/' {
		p = "/" + p
	}
	np := path.Clean(p)
	// path.Clean removes trailing slash except for root;
	// put the trailing slash back if necessary.
	if p[len(p)-1] == '/' && np != "/" {
2229 2230 2231 2232 2233 2234
		// Fast path for common case of p being the string we want:
		if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
			np = p
		} else {
			np += "/"
		}
2235 2236 2237 2238
	}
	return np
}

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253
// stripHostPort returns h without any trailing ":<port>".
func stripHostPort(h string) string {
	// If no port on host, return unchanged
	if strings.IndexByte(h, ':') == -1 {
		return h
	}
	host, _, err := net.SplitHostPort(h)
	if err != nil {
		return h // on error, return unchanged
	}
	return host
}

// Find a handler on a handler map given a path string.
// Most-specific (longest) pattern wins.
2254
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
2255 2256 2257 2258 2259 2260
	// Check for exact match first.
	v, ok := mux.m[path]
	if ok {
		return v.h, v.pattern
	}

2261 2262 2263 2264 2265
	// Check for longest valid match.  mux.es contains all patterns
	// that end in / sorted from longest to shortest.
	for _, e := range mux.es {
		if strings.HasPrefix(path, e.pattern) {
			return e.h, e.pattern
2266 2267
		}
	}
2268
	return nil, ""
2269 2270
}

2271 2272 2273 2274
// redirectToPathSlash determines if the given path needs appending "/" to it.
// This occurs when a handler for path + "/" was already registered, but
// not for path itself. If the path needs appending to, it creates a new
// URL, setting the path to u.Path + "/" and returning true to indicate so.
2275
func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
2276 2277 2278 2279
	mux.mu.RLock()
	shouldRedirect := mux.shouldRedirectRLocked(host, path)
	mux.mu.RUnlock()
	if !shouldRedirect {
2280 2281 2282 2283 2284 2285 2286
		return u, false
	}
	path = path + "/"
	u = &url.URL{Path: path, RawQuery: u.RawQuery}
	return u, true
}

2287
// shouldRedirectRLocked reports whether the given path and host should be redirected to
2288 2289
// path+"/". This should happen if a handler is registered for path+"/" but
// not path -- see comments at ServeMux.
2290
func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool {
2291 2292 2293 2294 2295 2296
	p := []string{path, host + path}

	for _, c := range p {
		if _, exist := mux.m[c]; exist {
			return false
		}
2297
	}
2298

2299
	n := len(path)
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
	if n == 0 {
		return false
	}
	for _, c := range p {
		if _, exist := mux.m[c+"/"]; exist {
			return path[n-1] != '/'
		}
	}

	return false
2310 2311
}

2312 2313 2314 2315
// Handler returns the handler to use for the given request,
// consulting r.Method, r.Host, and r.URL.Path. It always returns
// a non-nil handler. If the path is not in its canonical form, the
// handler will be an internally-generated handler that redirects
2316 2317 2318 2319
// to the canonical path. If the host contains a port, it is ignored
// when matching handlers.
//
// The path and host are used unchanged for CONNECT requests.
2320 2321 2322 2323 2324 2325 2326 2327
//
// Handler also returns the registered pattern that matches the
// request or, in the case of internally-generated redirects,
// the pattern that will match after following the redirect.
//
// If there is no registered handler that applies to the request,
// Handler returns a ``page not found'' handler and an empty pattern.
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
2328 2329 2330

	// CONNECT requests are not canonicalized.
	if r.Method == "CONNECT" {
2331 2332 2333
		// If r.URL.Path is /tree and its handler is not registered,
		// the /tree -> /tree/ redirect applies to CONNECT requests
		// but the path canonicalization does not.
2334
		if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
2335 2336 2337
			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
		}

2338
		return mux.handler(r.Host, r.URL.Path)
2339
	}
2340

2341 2342 2343 2344
	// All other requests have any port stripped and path cleaned
	// before passing to mux.handler.
	host := stripHostPort(r.Host)
	path := cleanPath(r.URL.Path)
2345 2346 2347

	// If the given path is /tree and its handler is not registered,
	// redirect for /tree/.
2348
	if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
2349 2350 2351
		return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
	}

2352 2353 2354 2355 2356 2357 2358 2359
	if path != r.URL.Path {
		_, pattern = mux.handler(host, path)
		url := *r.URL
		url.Path = path
		return RedirectHandler(url.String(), StatusMovedPermanently), pattern
	}

	return mux.handler(host, r.URL.Path)
2360 2361
}

2362 2363 2364
// handler is the main implementation of Handler.
// The path is known to be in canonical form, except for CONNECT methods.
func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
2365 2366 2367 2368
	mux.mu.RLock()
	defer mux.mu.RUnlock()

	// Host-specific pattern takes precedence over generic ones
2369 2370 2371
	if mux.hosts {
		h, pattern = mux.match(host + path)
	}
2372
	if h == nil {
2373
		h, pattern = mux.match(path)
2374 2375
	}
	if h == nil {
2376
		h, pattern = NotFoundHandler(), ""
2377
	}
2378
	return
2379 2380
}

2381 2382 2383
// ServeHTTP dispatches the request to the handler whose
// pattern most closely matches the request URL.
func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
2384
	if r.RequestURI == "*" {
2385 2386 2387
		if r.ProtoAtLeast(1, 1) {
			w.Header().Set("Connection", "close")
		}
2388 2389 2390
		w.WriteHeader(StatusBadRequest)
		return
	}
2391 2392
	h, _ := mux.Handler(r)
	h.ServeHTTP(w, r)
2393 2394 2395
}

// Handle registers the handler for the given pattern.
2396
// If a handler already exists for pattern, Handle panics.
2397
func (mux *ServeMux) Handle(pattern string, handler Handler) {
2398 2399 2400
	mux.mu.Lock()
	defer mux.mu.Unlock()

2401
	if pattern == "" {
2402
		panic("http: invalid pattern")
2403
	}
2404 2405 2406
	if handler == nil {
		panic("http: nil handler")
	}
2407
	if _, exist := mux.m[pattern]; exist {
2408 2409
		panic("http: multiple registrations for " + pattern)
	}
2410

2411 2412 2413
	if mux.m == nil {
		mux.m = make(map[string]muxEntry)
	}
2414 2415 2416 2417 2418
	e := muxEntry{h: handler, pattern: pattern}
	mux.m[pattern] = e
	if pattern[len(pattern)-1] == '/' {
		mux.es = appendSorted(mux.es, e)
	}
2419 2420 2421 2422

	if pattern[0] != '/' {
		mux.hosts = true
	}
2423 2424
}

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
	n := len(es)
	i := sort.Search(n, func(i int) bool {
		return len(es[i].pattern) < len(e.pattern)
	})
	if i == n {
		return append(es, e)
	}
	// we now know that i points at where we want to insert
	es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
	copy(es[i+1:], es[i:])      // Move shorter entries down
	es[i] = e
	return es
}

2440 2441
// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
2442 2443 2444
	if handler == nil {
		panic("http: nil handler")
	}
2445 2446 2447 2448 2449
	mux.Handle(pattern, HandlerFunc(handler))
}

// Handle registers the handler for the given pattern
// in the DefaultServeMux.
2450
// The documentation for ServeMux explains how patterns are matched.
2451 2452 2453 2454
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }

// HandleFunc registers the handler function for the given pattern
// in the DefaultServeMux.
2455
// The documentation for ServeMux explains how patterns are matched.
2456 2457 2458 2459 2460
func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
	DefaultServeMux.HandleFunc(pattern, handler)
}

// Serve accepts incoming HTTP connections on the listener l,
2461
// creating a new service goroutine for each. The service goroutines
2462
// read requests and then call handler to reply to them.
2463 2464 2465 2466 2467 2468 2469 2470
//
// The handler is typically nil, in which case the DefaultServeMux is used.
//
// HTTP/2 support is only enabled if the Listener returns *tls.Conn
// connections and they were configured with "h2" in the TLS
// Config.NextProtos.
//
// Serve always returns a non-nil error.
2471
func Serve(l net.Listener, handler Handler) error {
2472 2473 2474 2475
	srv := &Server{Handler: handler}
	return srv.Serve(l)
}

2476
// ServeTLS accepts incoming HTTPS connections on the listener l,
2477 2478 2479
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
//
2480
// The handler is typically nil, in which case the DefaultServeMux is used.
2481 2482 2483 2484 2485
//
// Additionally, files containing a certificate and matching private key
// for the server must be provided. If the certificate is signed by a
// certificate authority, the certFile should be the concatenation
// of the server's certificate, any intermediates, and the CA's certificate.
2486 2487
//
// ServeTLS always returns a non-nil error.
2488 2489 2490 2491 2492
func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
	srv := &Server{Handler: handler}
	return srv.ServeTLS(l, certFile, keyFile)
}

2493
// A Server defines parameters for running an HTTP server.
2494
// The zero value for Server is a valid configuration.
2495
type Server struct {
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
	Addr    string  // TCP address to listen on, ":http" if empty
	Handler Handler // handler to invoke, http.DefaultServeMux if nil

	// TLSConfig optionally provides a TLS configuration for use
	// by ServeTLS and ListenAndServeTLS. Note that this value is
	// cloned by ServeTLS and ListenAndServeTLS, so it's not
	// possible to modify the configuration with methods like
	// tls.Config.SetSessionTicketKeys. To use
	// SetSessionTicketKeys, use Server.Serve with a TLS Listener
	// instead.
	TLSConfig *tls.Config
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519

	// ReadTimeout is the maximum duration for reading the entire
	// request, including the body.
	//
	// Because ReadTimeout does not let Handlers make per-request
	// decisions on each request body's acceptable deadline or
	// upload rate, most users will prefer to use
	// ReadHeaderTimeout. It is valid to use them both.
	ReadTimeout time.Duration

	// ReadHeaderTimeout is the amount of time allowed to read
	// request headers. The connection's read deadline is reset
	// after reading the headers and the Handler can decide what
2520 2521 2522
	// is considered too slow for the body. If ReadHeaderTimeout
	// is zero, the value of ReadTimeout is used. If both are
	// zero, there is no timeout.
2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533
	ReadHeaderTimeout time.Duration

	// WriteTimeout is the maximum duration before timing out
	// writes of the response. It is reset whenever a new
	// request's header is read. Like ReadTimeout, it does not
	// let Handlers make decisions on a per-request basis.
	WriteTimeout time.Duration

	// IdleTimeout is the maximum amount of time to wait for the
	// next request when keep-alives are enabled. If IdleTimeout
	// is zero, the value of ReadTimeout is used. If both are
2534
	// zero, there is no timeout.
2535
	IdleTimeout time.Duration
2536 2537 2538 2539 2540 2541 2542

	// MaxHeaderBytes controls the maximum number of bytes the
	// server will read parsing the request header's keys and
	// values, including the request line. It does not limit the
	// size of the request body.
	// If zero, DefaultMaxHeaderBytes is used.
	MaxHeaderBytes int
2543 2544

	// TLSNextProto optionally specifies a function to take over
2545 2546
	// ownership of the provided TLS connection when an NPN/ALPN
	// protocol upgrade has occurred. The map key is the protocol
2547 2548
	// name negotiated. The Handler argument should be used to
	// handle HTTP requests and will initialize the Request's TLS
2549
	// and RemoteAddr if not already set. The connection is
2550
	// automatically closed when the function returns.
2551 2552
	// If TLSNextProto is not nil, HTTP/2 support is not enabled
	// automatically.
2553
	TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
2554 2555 2556 2557 2558 2559 2560

	// ConnState specifies an optional callback function that is
	// called when a client connection changes state. See the
	// ConnState type and associated constants for details.
	ConnState func(net.Conn, ConnState)

	// ErrorLog specifies an optional logger for errors accepting
2561 2562 2563
	// connections, unexpected behavior from handlers, and
	// underlying FileSystem errors.
	// If nil, logging is done via the log package's standard logger.
2564 2565
	ErrorLog *log.Logger

2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
	// BaseContext optionally specifies a function that returns
	// the base context for incoming requests on this server.
	// The provided Listener is the specific Listener that's
	// about to start accepting requests.
	// If BaseContext is nil, the default is context.Background().
	// If non-nil, it must return a non-nil context.
	BaseContext func(net.Listener) context.Context

	// ConnContext optionally specifies a function that modifies
	// the context used for a new connection c. The provided ctx
	// is derived from the base context and has a ServerContextKey
	// value.
	ConnContext func(ctx context.Context, c net.Conn) context.Context

2580
	disableKeepAlives int32     // accessed atomically.
2581
	inShutdown        int32     // accessed atomically (non-zero means we're in Shutdown)
2582 2583
	nextProtoOnce     sync.Once // guards setupHTTP2_* init
	nextProtoErr      error     // result of http2.ConfigureServer if used
2584 2585

	mu         sync.Mutex
2586
	listeners  map[*net.Listener]struct{}
2587 2588
	activeConn map[*conn]struct{}
	doneChan   chan struct{}
2589
	onShutdown []func()
2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
}

func (s *Server) getDoneChan() <-chan struct{} {
	s.mu.Lock()
	defer s.mu.Unlock()
	return s.getDoneChanLocked()
}

func (s *Server) getDoneChanLocked() chan struct{} {
	if s.doneChan == nil {
		s.doneChan = make(chan struct{})
	}
	return s.doneChan
}

func (s *Server) closeDoneChanLocked() {
	ch := s.getDoneChanLocked()
	select {
	case <-ch:
		// Already closed. Don't close again.
	default:
		// Safe to close here. We're the only closer, guarded
		// by s.mu.
		close(ch)
	}
}

// Close immediately closes all active net.Listeners and any
// connections in state StateNew, StateActive, or StateIdle. For a
// graceful shutdown, use Shutdown.
//
// Close does not attempt to close (and does not even know about)
// any hijacked connections, such as WebSockets.
//
// Close returns any error returned from closing the Server's
// underlying Listener(s).
func (srv *Server) Close() error {
2627
	atomic.StoreInt32(&srv.inShutdown, 1)
2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
	srv.mu.Lock()
	defer srv.mu.Unlock()
	srv.closeDoneChanLocked()
	err := srv.closeListenersLocked()
	for c := range srv.activeConn {
		c.rwc.Close()
		delete(srv.activeConn, c)
	}
	return err
}

// shutdownPollInterval is how often we poll for quiescence
// during Server.Shutdown. This is lower during tests, to
// speed up tests.
// Ideally we could find a solution that doesn't involve polling,
// but which also doesn't have a high runtime cost (and doesn't
// involve any contentious mutexes), but that is left as an
// exercise for the reader.
var shutdownPollInterval = 500 * time.Millisecond

// Shutdown gracefully shuts down the server without interrupting any
// active connections. Shutdown works by first closing all open
// listeners, then closing all idle connections, and then waiting
// indefinitely for connections to return to idle and then shut down.
// If the provided context expires before the shutdown is complete,
2653 2654 2655 2656 2657 2658
// Shutdown returns the context's error, otherwise it returns any
// error returned from closing the Server's underlying Listener(s).
//
// When Shutdown is called, Serve, ListenAndServe, and
// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
// program doesn't exit and waits instead for Shutdown to return.
2659 2660 2661 2662
//
// Shutdown does not attempt to close nor wait for hijacked
// connections such as WebSockets. The caller of Shutdown should
// separately notify such long-lived connections of shutdown and wait
2663 2664
// for them to close, if desired. See RegisterOnShutdown for a way to
// register shutdown notification functions.
2665 2666 2667
//
// Once Shutdown has been called on a server, it may not be reused;
// future calls to methods such as Serve will return ErrServerClosed.
2668
func (srv *Server) Shutdown(ctx context.Context) error {
2669
	atomic.StoreInt32(&srv.inShutdown, 1)
2670 2671 2672 2673

	srv.mu.Lock()
	lnerr := srv.closeListenersLocked()
	srv.closeDoneChanLocked()
2674 2675 2676
	for _, f := range srv.onShutdown {
		go f()
	}
2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
	srv.mu.Unlock()

	ticker := time.NewTicker(shutdownPollInterval)
	defer ticker.Stop()
	for {
		if srv.closeIdleConns() {
			return lnerr
		}
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-ticker.C:
		}
	}
}

2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
// RegisterOnShutdown registers a function to call on Shutdown.
// This can be used to gracefully shutdown connections that have
// undergone NPN/ALPN protocol upgrade or that have been hijacked.
// This function should start protocol-specific graceful shutdown,
// but should not wait for shutdown to complete.
func (srv *Server) RegisterOnShutdown(f func()) {
	srv.mu.Lock()
	srv.onShutdown = append(srv.onShutdown, f)
	srv.mu.Unlock()
}

2704 2705 2706 2707 2708 2709 2710
// closeIdleConns closes all idle connections and reports whether the
// server is quiescent.
func (s *Server) closeIdleConns() bool {
	s.mu.Lock()
	defer s.mu.Unlock()
	quiescent := true
	for c := range s.activeConn {
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
		st, unixSec := c.getState()
		// Issue 22682: treat StateNew connections as if
		// they're idle if we haven't read the first request's
		// header in over 5 seconds.
		if st == StateNew && unixSec < time.Now().Unix()-5 {
			st = StateIdle
		}
		if st != StateIdle || unixSec == 0 {
			// Assume unixSec == 0 means it's a very new
			// connection, without state set yet.
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732
			quiescent = false
			continue
		}
		c.rwc.Close()
		delete(s.activeConn, c)
	}
	return quiescent
}

func (s *Server) closeListenersLocked() error {
	var err error
	for ln := range s.listeners {
2733
		if cerr := (*ln).Close(); cerr != nil && err == nil {
2734 2735 2736 2737 2738
			err = cerr
		}
		delete(s.listeners, ln)
	}
	return err
2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
}

// A ConnState represents the state of a client connection to a server.
// It's used by the optional Server.ConnState hook.
type ConnState int

const (
	// StateNew represents a new connection that is expected to
	// send a request immediately. Connections begin at this
	// state and then transition to either StateActive or
	// StateClosed.
	StateNew ConnState = iota

	// StateActive represents a connection that has read 1 or more
	// bytes of a request. The Server.ConnState hook for
	// StateActive fires before the request has entered a handler
	// and doesn't fire again until the request has been
	// handled. After the request is handled, the state
	// transitions to StateClosed, StateHijacked, or StateIdle.
2758 2759 2760
	// For HTTP/2, StateActive fires on the transition from zero
	// to one active request, and only transitions away once all
	// active requests are complete. That means that ConnState
2761
	// cannot be used to do per-request work; ConnState only notes
2762
	// the overall state of the connection.
2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
	StateActive

	// StateIdle represents a connection that has finished
	// handling a request and is in the keep-alive state, waiting
	// for a new request. Connections transition from StateIdle
	// to either StateActive or StateClosed.
	StateIdle

	// StateHijacked represents a hijacked connection.
	// This is a terminal state. It does not transition to StateClosed.
	StateHijacked

	// StateClosed represents a closed connection.
	// This is a terminal state. Hijacked connections do not
	// transition to StateClosed.
	StateClosed
)

var stateName = map[ConnState]string{
	StateNew:      "new",
	StateActive:   "active",
	StateIdle:     "idle",
	StateHijacked: "hijacked",
	StateClosed:   "closed",
}

func (c ConnState) String() string {
	return stateName[c]
2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
}

// serverHandler delegates to either the server's Handler or
// DefaultServeMux and also handles "OPTIONS *" requests.
type serverHandler struct {
	srv *Server
}

func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
	handler := sh.srv.Handler
	if handler == nil {
		handler = DefaultServeMux
	}
	if req.RequestURI == "*" && req.Method == "OPTIONS" {
		handler = globalOptionsHandler{}
	}
	handler.ServeHTTP(rw, req)
2808 2809 2810
}

// ListenAndServe listens on the TCP network address srv.Addr and then
2811 2812
// calls Serve to handle requests on incoming connections.
// Accepted connections are configured to enable TCP keep-alives.
2813
//
2814
// If srv.Addr is blank, ":http" is used.
2815 2816 2817
//
// ListenAndServe always returns a non-nil error. After Shutdown or Close,
// the returned error is ErrServerClosed.
2818
func (srv *Server) ListenAndServe() error {
2819 2820 2821
	if srv.shuttingDown() {
		return ErrServerClosed
	}
2822 2823 2824 2825
	addr := srv.Addr
	if addr == "" {
		addr = ":http"
	}
2826 2827 2828
	ln, err := net.Listen("tcp", addr)
	if err != nil {
		return err
2829
	}
2830
	return srv.Serve(ln)
2831 2832
}

2833 2834
var testHookServerServe func(*Server, net.Listener) // used if non-nil

2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
// shouldDoServeHTTP2 reports whether Server.Serve should configure
// automatic HTTP/2. (which sets up the srv.TLSNextProto map)
func (srv *Server) shouldConfigureHTTP2ForServe() bool {
	if srv.TLSConfig == nil {
		// Compatibility with Go 1.6:
		// If there's no TLSConfig, it's possible that the user just
		// didn't set it on the http.Server, but did pass it to
		// tls.NewListener and passed that listener to Serve.
		// So we should configure HTTP/2 (to set up srv.TLSNextProto)
		// in case the listener returns an "h2" *tls.Conn.
		return true
	}
	// The user specified a TLSConfig on their http.Server.
	// In this, case, only configure HTTP/2 if their tls.Config
	// explicitly mentions "h2". Otherwise http2.ConfigureServer
	// would modify the tls.Config to add it, but they probably already
	// passed this tls.Config to tls.NewListener. And if they did,
	// it's too late anyway to fix it. It would only be potentially racy.
	// See Issue 15908.
	return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
}

2857 2858
// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
// and ListenAndServeTLS methods after a call to Shutdown or Close.
2859 2860
var ErrServerClosed = errors.New("http: Server closed")

2861
// Serve accepts incoming connections on the Listener l, creating a
2862
// new service goroutine for each. The service goroutines read requests and
2863
// then call srv.Handler to reply to them.
2864
//
2865 2866 2867
// HTTP/2 support is only enabled if the Listener returns *tls.Conn
// connections and they were configured with "h2" in the TLS
// Config.NextProtos.
2868
//
2869 2870
// Serve always returns a non-nil error and closes l.
// After Shutdown or Close, the returned error is ErrServerClosed.
2871
func (srv *Server) Serve(l net.Listener) error {
2872
	if fn := testHookServerServe; fn != nil {
2873
		fn(srv, l) // call hook with unwrapped listener
2874
	}
2875

2876
	origListener := l
2877 2878
	l = &onceCloseListener{Listener: l}
	defer l.Close()
2879

2880 2881
	if err := srv.setupHTTP2_Serve(); err != nil {
		return err
2882
	}
2883

2884 2885 2886 2887
	if !srv.trackListener(&l, true) {
		return ErrServerClosed
	}
	defer srv.trackListener(&l, false)
2888

2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
	var tempDelay time.Duration // how long to sleep on accept failure

	baseCtx := context.Background()
	if srv.BaseContext != nil {
		baseCtx = srv.BaseContext(origListener)
		if baseCtx == nil {
			panic("BaseContext returned a nil context")
		}
	}

2899
	ctx := context.WithValue(baseCtx, ServerContextKey, srv)
2900 2901 2902
	for {
		rw, e := l.Accept()
		if e != nil {
2903 2904 2905 2906 2907
			select {
			case <-srv.getDoneChan():
				return ErrServerClosed
			default:
			}
2908
			if ne, ok := e.(net.Error); ok && ne.Temporary() {
2909 2910 2911 2912 2913 2914 2915 2916
				if tempDelay == 0 {
					tempDelay = 5 * time.Millisecond
				} else {
					tempDelay *= 2
				}
				if max := 1 * time.Second; tempDelay > max {
					tempDelay = max
				}
2917
				srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay)
2918
				time.Sleep(tempDelay)
2919 2920
				continue
			}
2921 2922
			return e
		}
2923 2924 2925 2926 2927 2928
		if cc := srv.ConnContext; cc != nil {
			ctx = cc(ctx, rw)
			if ctx == nil {
				panic("ConnContext returned nil")
			}
		}
2929
		tempDelay = 0
2930
		c := srv.newConn(rw)
2931
		c.setState(c.rwc, StateNew) // before Serve can return
2932
		go c.serve(ctx)
2933 2934 2935
	}
}

2936
// ServeTLS accepts incoming connections on the Listener l, creating a
2937 2938
// new service goroutine for each. The service goroutines perform TLS
// setup and then read requests, calling srv.Handler to reply to them.
2939
//
2940 2941 2942 2943 2944 2945
// Files containing a certificate and matching private key for the
// server must be provided if neither the Server's
// TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
// If the certificate is signed by a certificate authority, the
// certFile should be the concatenation of the server's certificate,
// any intermediates, and the CA's certificate.
2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
//
// ServeTLS always returns a non-nil error. After Shutdown or Close, the
// returned error is ErrServerClosed.
func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
	// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
	// before we clone it and create the TLS Listener.
	if err := srv.setupHTTP2_ServeTLS(); err != nil {
		return err
	}

	config := cloneTLSConfig(srv.TLSConfig)
	if !strSliceContains(config.NextProtos, "http/1.1") {
		config.NextProtos = append(config.NextProtos, "http/1.1")
	}

	configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
	if !configHasCert || certFile != "" || keyFile != "" {
		var err error
		config.Certificates = make([]tls.Certificate, 1)
		config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
		if err != nil {
			return err
		}
	}

	tlsListener := tls.NewListener(l, config)
	return srv.Serve(tlsListener)
}

2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985
// trackListener adds or removes a net.Listener to the set of tracked
// listeners.
//
// We store a pointer to interface in the map set, in case the
// net.Listener is not comparable. This is safe because we only call
// trackListener via Serve and can track+defer untrack the same
// pointer to local variable there. We never need to compare a
// Listener from another caller.
//
// It reports whether the server is still up (not Shutdown or Closed).
func (s *Server) trackListener(ln *net.Listener, add bool) bool {
2986 2987 2988
	s.mu.Lock()
	defer s.mu.Unlock()
	if s.listeners == nil {
2989
		s.listeners = make(map[*net.Listener]struct{})
2990 2991
	}
	if add {
2992 2993
		if s.shuttingDown() {
			return false
2994 2995 2996 2997 2998
		}
		s.listeners[ln] = struct{}{}
	} else {
		delete(s.listeners, ln)
	}
2999
	return true
3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028
}

func (s *Server) trackConn(c *conn, add bool) {
	s.mu.Lock()
	defer s.mu.Unlock()
	if s.activeConn == nil {
		s.activeConn = make(map[*conn]struct{})
	}
	if add {
		s.activeConn[c] = struct{}{}
	} else {
		delete(s.activeConn, c)
	}
}

func (s *Server) idleTimeout() time.Duration {
	if s.IdleTimeout != 0 {
		return s.IdleTimeout
	}
	return s.ReadTimeout
}

func (s *Server) readHeaderTimeout() time.Duration {
	if s.ReadHeaderTimeout != 0 {
		return s.ReadHeaderTimeout
	}
	return s.ReadTimeout
}

3029
func (s *Server) doKeepAlives() bool {
3030 3031 3032 3033
	return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown()
}

func (s *Server) shuttingDown() bool {
3034 3035
	// TODO: replace inShutdown with the existing atomicBool type;
	// see https://github.com/golang/go/issues/20239#issuecomment-381434582
3036
	return atomic.LoadInt32(&s.inShutdown) != 0
3037 3038 3039 3040 3041 3042
}

// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
// By default, keep-alives are always enabled. Only very
// resource-constrained environments or servers in the process of
// shutting down should disable them.
3043
func (srv *Server) SetKeepAlivesEnabled(v bool) {
3044
	if v {
3045
		atomic.StoreInt32(&srv.disableKeepAlives, 0)
3046
		return
3047
	}
3048 3049 3050 3051 3052
	atomic.StoreInt32(&srv.disableKeepAlives, 1)

	// Close idle HTTP/1 conns:
	srv.closeIdleConns()

3053
	// TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
}

func (s *Server) logf(format string, args ...interface{}) {
	if s.ErrorLog != nil {
		s.ErrorLog.Printf(format, args...)
	} else {
		log.Printf(format, args...)
	}
}

3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
// logf prints to the ErrorLog of the *Server associated with request r
// via ServerContextKey. If there's no associated server, or if ErrorLog
// is nil, logging is done via the log package's standard logger.
func logf(r *Request, format string, args ...interface{}) {
	s, _ := r.Context().Value(ServerContextKey).(*Server)
	if s != nil && s.ErrorLog != nil {
		s.ErrorLog.Printf(format, args...)
	} else {
		log.Printf(format, args...)
	}
}

3076 3077
// ListenAndServe listens on the TCP network address addr and then calls
// Serve with handler to handle requests on incoming connections.
3078
// Accepted connections are configured to enable TCP keep-alives.
3079
//
3080
// The handler is typically nil, in which case the DefaultServeMux is used.
3081 3082
//
// ListenAndServe always returns a non-nil error.
3083
func ListenAndServe(addr string, handler Handler) error {
3084 3085
	server := &Server{Addr: addr, Handler: handler}
	return server.ListenAndServe()
3086 3087 3088 3089
}

// ListenAndServeTLS acts identically to ListenAndServe, except that it
// expects HTTPS connections. Additionally, files containing a certificate and
3090 3091
// matching private key for the server must be provided. If the certificate
// is signed by a certificate authority, the certFile should be the concatenation
3092
// of the server's certificate, any intermediates, and the CA's certificate.
3093
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
3094 3095 3096 3097 3098
	server := &Server{Addr: addr, Handler: handler}
	return server.ListenAndServeTLS(certFile, keyFile)
}

// ListenAndServeTLS listens on the TCP network address srv.Addr and
3099
// then calls ServeTLS to handle requests on incoming TLS connections.
3100
// Accepted connections are configured to enable TCP keep-alives.
3101
//
3102
// Filenames containing a certificate and matching private key for the
3103 3104 3105 3106 3107
// server must be provided if neither the Server's TLSConfig.Certificates
// nor TLSConfig.GetCertificate are populated. If the certificate is
// signed by a certificate authority, the certFile should be the
// concatenation of the server's certificate, any intermediates, and
// the CA's certificate.
3108 3109
//
// If srv.Addr is blank, ":https" is used.
3110
//
3111 3112
// ListenAndServeTLS always returns a non-nil error. After Shutdown or
// Close, the returned error is ErrServerClosed.
3113
func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
3114 3115 3116
	if srv.shuttingDown() {
		return ErrServerClosed
	}
3117
	addr := srv.Addr
3118 3119 3120
	if addr == "" {
		addr = ":https"
	}
3121

3122
	ln, err := net.Listen("tcp", addr)
3123 3124 3125 3126
	if err != nil {
		return err
	}

3127 3128
	defer ln.Close()

3129
	return srv.ServeTLS(ln, certFile, keyFile)
3130
}
3131

3132
// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
3133
// srv and reports whether there was an error setting it up. If it is
3134
// not configured for policy reasons, nil is returned.
3135
func (srv *Server) setupHTTP2_ServeTLS() error {
3136 3137 3138 3139
	srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
	return srv.nextProtoErr
}

3140 3141
// setupHTTP2_Serve is called from (*Server).Serve and conditionally
// configures HTTP/2 on srv using a more conservative policy than
3142 3143
// setupHTTP2_ServeTLS because Serve is called after tls.Listen,
// and may be called concurrently. See shouldConfigureHTTP2ForServe.
3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
//
// The tests named TestTransportAutomaticHTTP2* and
// TestConcurrentServerServe in server_test.go demonstrate some
// of the supported use cases and motivations.
func (srv *Server) setupHTTP2_Serve() error {
	srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
	return srv.nextProtoErr
}

func (srv *Server) onceSetNextProtoDefaults_Serve() {
	if srv.shouldConfigureHTTP2ForServe() {
		srv.onceSetNextProtoDefaults()
	}
}

3159 3160
// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
// configured otherwise. (by setting srv.TLSNextProto non-nil)
3161
// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
3162 3163 3164 3165 3166 3167 3168
func (srv *Server) onceSetNextProtoDefaults() {
	if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") {
		return
	}
	// Enable HTTP/2 by default if the user hasn't otherwise
	// configured their TLSNextProto map.
	if srv.TLSNextProto == nil {
3169 3170 3171 3172
		conf := &http2Server{
			NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
		}
		srv.nextProtoErr = http2ConfigureServer(srv, conf)
3173 3174 3175
	}
}

3176 3177 3178
// TimeoutHandler returns a Handler that runs h with the given time limit.
//
// The new Handler calls h.ServeHTTP to handle each request, but if a
3179
// call runs for longer than its time limit, the handler responds with
3180 3181 3182 3183
// a 503 Service Unavailable error and the given message in its body.
// (If msg is empty, a suitable default message will be sent.)
// After such a timeout, writes by h to its ResponseWriter will return
// ErrHandlerTimeout.
3184
//
3185 3186
// TimeoutHandler supports the Flusher and Pusher interfaces but does not
// support the Hijacker interface.
3187
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
3188 3189 3190
	return &timeoutHandler{
		handler: h,
		body:    msg,
3191
		dt:      dt,
3192 3193 3194 3195 3196
	}
}

// ErrHandlerTimeout is returned on ResponseWriter Write calls
// in handlers which have timed out.
3197
var ErrHandlerTimeout = errors.New("http: Handler timeout")
3198 3199 3200 3201

type timeoutHandler struct {
	handler Handler
	body    string
3202
	dt      time.Duration
3203

3204
	// When set, no context will be created and this context will
3205
	// be used instead.
3206
	testContext context.Context
3207 3208 3209 3210 3211 3212 3213 3214 3215 3216
}

func (h *timeoutHandler) errorBody() string {
	if h.body != "" {
		return h.body
	}
	return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
}

func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
3217 3218 3219 3220 3221
	ctx := h.testContext
	if ctx == nil {
		var cancelCtx context.CancelFunc
		ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
		defer cancelCtx()
3222
	}
3223
	r = r.WithContext(ctx)
3224 3225 3226 3227 3228
	done := make(chan struct{})
	tw := &timeoutWriter{
		w: w,
		h: make(Header),
	}
3229
	panicChan := make(chan interface{}, 1)
3230
	go func() {
3231 3232 3233 3234 3235
		defer func() {
			if p := recover(); p != nil {
				panicChan <- p
			}
		}()
3236
		h.handler.ServeHTTP(tw, r)
3237
		close(done)
3238 3239
	}()
	select {
3240 3241
	case p := <-panicChan:
		panic(p)
3242 3243 3244
	case <-done:
		tw.mu.Lock()
		defer tw.mu.Unlock()
3245 3246 3247 3248
		dst := w.Header()
		for k, vv := range tw.h {
			dst[k] = vv
		}
3249 3250 3251
		if !tw.wroteHeader {
			tw.code = StatusOK
		}
3252 3253
		w.WriteHeader(tw.code)
		w.Write(tw.wbuf.Bytes())
3254
	case <-ctx.Done():
3255 3256 3257 3258
		tw.mu.Lock()
		defer tw.mu.Unlock()
		w.WriteHeader(StatusServiceUnavailable)
		io.WriteString(w, h.errorBody())
3259 3260 3261 3262 3263
		tw.timedOut = true
	}
}

type timeoutWriter struct {
3264 3265 3266
	w    ResponseWriter
	h    Header
	wbuf bytes.Buffer
3267 3268 3269 3270

	mu          sync.Mutex
	timedOut    bool
	wroteHeader bool
3271
	code        int
3272 3273
}

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292
var _ Pusher = (*timeoutWriter)(nil)
var _ Flusher = (*timeoutWriter)(nil)

// Push implements the Pusher interface.
func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
	if pusher, ok := tw.w.(Pusher); ok {
		return pusher.Push(target, opts)
	}
	return ErrNotSupported
}

// Flush implements the Flusher interface.
func (tw *timeoutWriter) Flush() {
	f, ok := tw.w.(Flusher)
	if ok {
		f.Flush()
	}
}

3293
func (tw *timeoutWriter) Header() Header { return tw.h }
3294

3295
func (tw *timeoutWriter) Write(p []byte) (int, error) {
3296
	tw.mu.Lock()
3297 3298
	defer tw.mu.Unlock()
	if tw.timedOut {
3299 3300
		return 0, ErrHandlerTimeout
	}
3301 3302 3303 3304
	if !tw.wroteHeader {
		tw.writeHeader(StatusOK)
	}
	return tw.wbuf.Write(p)
3305 3306 3307
}

func (tw *timeoutWriter) WriteHeader(code int) {
3308
	checkWriteHeaderCode(code)
3309
	tw.mu.Lock()
3310
	defer tw.mu.Unlock()
3311 3312 3313
	if tw.timedOut || tw.wroteHeader {
		return
	}
3314 3315 3316 3317
	tw.writeHeader(code)
}

func (tw *timeoutWriter) writeHeader(code int) {
3318
	tw.wroteHeader = true
3319
	tw.code = code
3320
}
3321

3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
// onceCloseListener wraps a net.Listener, protecting it from
// multiple Close calls.
type onceCloseListener struct {
	net.Listener
	once     sync.Once
	closeErr error
}

func (oc *onceCloseListener) Close() error {
	oc.once.Do(oc.close)
	return oc.closeErr
}

func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }

3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352
// globalOptionsHandler responds to "OPTIONS *" requests.
type globalOptionsHandler struct{}

func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
	w.Header().Set("Content-Length", "0")
	if r.ContentLength != 0 {
		// Read up to 4KB of OPTIONS body (as mentioned in the
		// spec as being reserved for future use), but anything
		// over that is considered a waste of server resources
		// (or an attack) and we abort and close the connection,
		// courtesy of MaxBytesReader's EOF behavior.
		mb := MaxBytesReader(w, r.Body, 4<<10)
		io.Copy(ioutil.Discard, mb)
	}
}

3353 3354 3355 3356
// initNPNRequest is an HTTP handler that initializes certain
// uninitialized fields in its *Request. Such partially-initialized
// Requests come from NPN protocol handlers.
type initNPNRequest struct {
3357 3358 3359
	ctx context.Context
	c   *tls.Conn
	h   serverHandler
3360 3361
}

3362 3363 3364 3365 3366 3367
// BaseContext is an exported but unadvertised http.Handler method
// recognized by x/net/http2 to pass down a context; the TLSNextProto
// API predates context support so we shoehorn through the only
// interface we have available.
func (h initNPNRequest) BaseContext() context.Context { return h.ctx }

3368 3369 3370 3371 3372 3373
func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
	if req.TLS == nil {
		req.TLS = &tls.ConnectionState{}
		*req.TLS = h.c.ConnectionState()
	}
	if req.Body == nil {
3374
		req.Body = NoBody
3375 3376 3377 3378 3379 3380 3381
	}
	if req.RemoteAddr == "" {
		req.RemoteAddr = h.c.RemoteAddr().String()
	}
	h.h.ServeHTTP(rw, req)
}

3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
// loggingConn is used for debugging.
type loggingConn struct {
	name string
	net.Conn
}

var (
	uniqNameMu   sync.Mutex
	uniqNameNext = make(map[string]int)
)

func newLoggingConn(baseName string, c net.Conn) net.Conn {
	uniqNameMu.Lock()
	defer uniqNameMu.Unlock()
	uniqNameNext[baseName]++
	return &loggingConn{
		name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
		Conn: c,
	}
}

func (c *loggingConn) Write(p []byte) (n int, err error) {
	log.Printf("%s.Write(%d) = ....", c.name, len(p))
	n, err = c.Conn.Write(p)
	log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
	return
}

func (c *loggingConn) Read(p []byte) (n int, err error) {
	log.Printf("%s.Read(%d) = ....", c.name, len(p))
	n, err = c.Conn.Read(p)
	log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
	return
}

func (c *loggingConn) Close() (err error) {
	log.Printf("%s.Close() = ...", c.name)
	err = c.Conn.Close()
	log.Printf("%s.Close() = %v", c.name, err)
	return
}
3423 3424 3425 3426 3427 3428 3429 3430 3431

// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
// It only contains one field (and a pointer field at that), so it
// fits in an interface value without an extra allocation.
type checkConnErrorWriter struct {
	c *conn
}

func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
3432
	n, err = w.c.rwc.Write(p)
3433 3434
	if err != nil && w.c.werr == nil {
		w.c.werr = err
3435
		w.c.cancelCtx()
3436 3437 3438
	}
	return
}
3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450

func numLeadingCRorLF(v []byte) (n int) {
	for _, b := range v {
		if b == '\r' || b == '\n' {
			n++
			continue
		}
		break
	}
	return

}
3451 3452 3453 3454 3455 3456 3457 3458 3459

func strSliceContains(ss []string, s string) bool {
	for _, v := range ss {
		if v == s {
			return true
		}
	}
	return false
}
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469

// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
// looks like it might've been a misdirected plaintext HTTP request.
func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
	switch string(hdr[:]) {
	case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
		return true
	}
	return false
}