Source File
chunked.go
Belonging Package
net/http/internal
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The wire protocol for HTTP's "chunked" Transfer-Encoding.
// Package internal contains HTTP internals shared by net/http and
// net/http/httputil.
package internal
import (
)
const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
var ErrLineTooLong = errors.New("header line too long")
// NewChunkedReader returns a new chunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The chunkedReader returns [io.EOF] when the final 0-length chunk is read.
//
// NewChunkedReader is not needed by normal applications. The http package
// automatically decodes chunking when reading response bodies.
func ( io.Reader) io.Reader {
, := .(*bufio.Reader)
if ! {
= bufio.NewReader()
}
return &chunkedReader{r: }
}
type chunkedReader struct {
r *bufio.Reader
n uint64 // unread bytes in chunk
err error
buf [2]byte
checkEnd bool // whether need to check for \r\n chunk footer
excess int64 // "excessive" chunk overhead, for malicious sender detection
}
func ( *chunkedReader) () {
// chunk-size CRLF
var []byte
, .err = readChunkLine(.r)
if .err != nil {
return
}
.excess += int64(len()) + 2 // header, plus \r\n after the chunk data
= trimTrailingWhitespace()
, .err = removeChunkExtension()
if .err != nil {
return
}
.n, .err = parseHexUint()
if .err != nil {
return
}
// A sender who sends one byte per chunk will send 5 bytes of overhead
// for every byte of data. ("1\r\nX\r\n" to send "X".)
// We want to allow this, since streaming a byte at a time can be legitimate.
//
// A sender can use chunk extensions to add arbitrary amounts of additional
// data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
// We don't want to disallow extensions (although we discard them),
// but we also don't want to allow a sender to reduce the signal/noise ratio
// arbitrarily.
//
// We track the amount of excess overhead read,
// and produce an error if it grows too large.
//
// Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
// plus twice the amount of real data in the chunk.
.excess -= 16 + (2 * int64(.n))
.excess = max(.excess, 0)
if .excess > 16*1024 {
.err = errors.New("chunked encoding contains too much non-data")
}
if .n == 0 {
.err = io.EOF
}
}
func ( *chunkedReader) () bool {
:= .r.Buffered()
if > 0 {
, := .r.Peek()
return bytes.IndexByte(, '\n') >= 0
}
return false
}
func ( *chunkedReader) ( []uint8) ( int, error) {
for .err == nil {
if .checkEnd {
if > 0 && .r.Buffered() < 2 {
// We have some data. Return early (per the io.Reader
// contract) instead of potentially blocking while
// reading more.
break
}
if _, .err = io.ReadFull(.r, .buf[:2]); .err == nil {
if string(.buf[:]) != "\r\n" {
.err = errors.New("malformed chunked encoding")
break
}
} else {
if .err == io.EOF {
.err = io.ErrUnexpectedEOF
}
break
}
.checkEnd = false
}
if .n == 0 {
if > 0 && !.chunkHeaderAvailable() {
// We've read enough. Don't potentially block
// reading a new chunk header.
break
}
.beginChunk()
continue
}
if len() == 0 {
break
}
:=
if uint64(len()) > .n {
= [:.n]
}
var int
, .err = .r.Read()
+=
= [:]
.n -= uint64()
// If we're at the end of a chunk, read the next two
// bytes to verify they are "\r\n".
if .n == 0 && .err == nil {
.checkEnd = true
} else if .err == io.EOF {
.err = io.ErrUnexpectedEOF
}
}
return , .err
}
// Read a line of bytes (up to \n) from b.
// Give up if the line exceeds maxLineLength.
// The returned bytes are owned by the bufio.Reader
// so they are only valid until the next bufio read.
func ( *bufio.Reader) ([]byte, error) {
, := .ReadSlice('\n')
if != nil {
// We always know when EOF is coming.
// If the caller asked for a line, there should be a line.
if == io.EOF {
= io.ErrUnexpectedEOF
} else if == bufio.ErrBufferFull {
= ErrLineTooLong
}
return nil,
}
// RFC 9112 permits parsers to accept a bare \n as a line ending in headers,
// but not in chunked encoding lines. See https://www.rfc-editor.org/errata/eid7633,
// which explicitly rejects a clarification permitting \n as a chunk terminator.
//
// Verify that the line ends in a CRLF, and that no CRs appear before the end.
if := bytes.IndexByte(, '\r'); == -1 {
return nil, errors.New("chunked line ends with bare LF")
} else if != len()-2 {
return nil, errors.New("invalid CR in chunked line")
}
= [:len()-2] // trim CRLF
if len() >= maxLineLength {
return nil, ErrLineTooLong
}
return , nil
}
func ( []byte) []byte {
for len() > 0 && isOWS([len()-1]) {
= [:len()-1]
}
return
}
func ( byte) bool {
return == ' ' || == '\t'
}
var semi = []byte(";")
// removeChunkExtension removes any chunk-extension from p.
// For example,
//
// "0" => "0"
// "0;token" => "0"
// "0;token=val" => "0"
// `0;token="quoted string"` => "0"
func ( []byte) ([]byte, error) {
, _, _ = bytes.Cut(, semi)
// TODO: care about exact syntax of chunk extensions? We're
// ignoring and stripping them anyway. For now just never
// return an error.
return , nil
}
// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
// "chunked" format before writing them to w. Closing the returned chunkedWriter
// sends the final 0-length chunk that marks the end of the stream but does
// not send the final CRLF that appears after trailers; trailers and the last
// CRLF must be written separately.
//
// NewChunkedWriter is not needed by normal applications. The http
// package adds chunking automatically if handlers don't set a
// Content-Length header. Using newChunkedWriter inside a handler
// would result in double chunking or chunking with a Content-Length
// length, both of which are wrong.
func ( io.Writer) io.WriteCloser {
return &chunkedWriter{}
}
// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
// Encoding wire format to the underlying Wire chunkedWriter.
type chunkedWriter struct {
Wire io.Writer
}
// Write the contents of data as one chunk to Wire.
// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
// a bug since it does not check for success of [io.WriteString]
func ( *chunkedWriter) ( []byte) ( int, error) {
// Don't send 0-length data. It looks like EOF for chunked encoding.
if len() == 0 {
return 0, nil
}
if _, = fmt.Fprintf(.Wire, "%x\r\n", len()); != nil {
return 0,
}
if , = .Wire.Write(); != nil {
return
}
if != len() {
= io.ErrShortWrite
return
}
if _, = io.WriteString(.Wire, "\r\n"); != nil {
return
}
if , := .Wire.(*FlushAfterChunkWriter); {
= .Flush()
}
return
}
func ( *chunkedWriter) () error {
, := io.WriteString(.Wire, "0\r\n")
return
}
// FlushAfterChunkWriter signals from the caller of [NewChunkedWriter]
// that each chunk should be followed by a flush. It is used by the
// [net/http.Transport] code to keep the buffering behavior for headers and
// trailers, but flush out chunks aggressively in the middle for
// request bodies which may be generated slowly. See Issue 6574.
type FlushAfterChunkWriter struct {
*bufio.Writer
}
func ( []byte) ( uint64, error) {
if len() == 0 {
return 0, errors.New("empty hex number for chunk length")
}
for , := range {
switch {
case '0' <= && <= '9':
= - '0'
case 'a' <= && <= 'f':
= - 'a' + 10
case 'A' <= && <= 'F':
= - 'A' + 10
default:
return 0, errors.New("invalid byte in chunk length")
}
if == 16 {
return 0, errors.New("http chunk length too large")
}
<<= 4
|= uint64()
}
return
}
![]() |
The pages are generated with Golds v0.7.6. (GOOS=linux GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds. |