// Copyright 2014 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package http2import ()// Buffer chunks are allocated from a pool to reduce pressure on GC.// The maximum wasted space per dataBuffer is 2x the largest size class,// which happens when the dataBuffer has multiple chunks and there is// one unread byte in both the first and last chunks. We use a few size// classes to minimize overheads for servers that typically receive very// small request bodies.//// TODO: Benchmark to determine if the pools are necessary. The GC may have// improved enough that we can instead allocate chunks like this:// make([]byte, max(16<<10, expectedBytesRemaining))vardataChunkPools = [...]sync.Pool{ {New: func() interface{} { returnnew([1 << 10]byte) }}, {New: func() interface{} { returnnew([2 << 10]byte) }}, {New: func() interface{} { returnnew([4 << 10]byte) }}, {New: func() interface{} { returnnew([8 << 10]byte) }}, {New: func() interface{} { returnnew([16 << 10]byte) }},}func ( int64) []byte {switch {case <= 1<<10:returndataChunkPools[0].Get().(*[1 << 10]byte)[:]case <= 2<<10:returndataChunkPools[1].Get().(*[2 << 10]byte)[:]case <= 4<<10:returndataChunkPools[2].Get().(*[4 << 10]byte)[:]case <= 8<<10:returndataChunkPools[3].Get().(*[8 << 10]byte)[:]default:returndataChunkPools[4].Get().(*[16 << 10]byte)[:] }}func ( []byte) {switchlen() {case1 << 10:dataChunkPools[0].Put((*[1 << 10]byte)())case2 << 10:dataChunkPools[1].Put((*[2 << 10]byte)())case4 << 10:dataChunkPools[2].Put((*[4 << 10]byte)())case8 << 10:dataChunkPools[3].Put((*[8 << 10]byte)())case16 << 10:dataChunkPools[4].Put((*[16 << 10]byte)())default:panic(fmt.Sprintf("unexpected buffer len=%v", len())) }}// dataBuffer is an io.ReadWriter backed by a list of data chunks.// Each dataBuffer is used to read DATA frames on a single stream.// The buffer is divided into chunks so the server can limit the// total memory used by a single connection without limiting the// request body size on any single stream.typedataBufferstruct {chunks [][]byterint// next byte to read is chunks[0][r]wint// next byte to write is chunks[len(chunks)-1][w]sizeint// total buffered bytesexpectedint64// we expect at least this many bytes in future Write calls (ignored if <= 0)}varerrReadEmpty = errors.New("read from empty dataBuffer")// Read copies bytes from the buffer into p.// It is an error to read when no data is available.func ( *dataBuffer) ( []byte) (int, error) {if .size == 0 {return0, errReadEmpty }varintforlen() > 0 && .size > 0 { := .bytesFromFirstChunk() := copy(, ) = [:] += .r += .size -= // If the first chunk has been consumed, advance to the next chunk.if .r == len(.chunks[0]) {putDataBufferChunk(.chunks[0]) := len(.chunks) - 1copy(.chunks[:], .chunks[1:]) .chunks[] = nil .chunks = .chunks[:] .r = 0 } }return , nil}func ( *dataBuffer) () []byte {iflen(.chunks) == 1 {return .chunks[0][.r:.w] }return .chunks[0][.r:]}// Len returns the number of bytes of the unread portion of the buffer.func ( *dataBuffer) () int {return .size}// Write appends p to the buffer.func ( *dataBuffer) ( []byte) (int, error) { := len()forlen() > 0 {// If the last chunk is empty, allocate a new chunk. Try to allocate // enough to fully copy p plus any additional bytes we expect to // receive. However, this may allocate less than len(p). := int64(len())if .expected > { = .expected } := .lastChunkOrAlloc() := copy([.w:], ) = [:] .w += .size += .expected -= int64() }return , nil}func ( *dataBuffer) ( int64) []byte {iflen(.chunks) != 0 { := .chunks[len(.chunks)-1]if .w < len() {return } } := getDataBufferChunk() .chunks = append(.chunks, ) .w = 0return}
The pages are generated with Goldsv0.7.6. (GOOS=linux GOARCH=amd64)