Source File
tracebuf.go
Belonging Package
runtime
// Copyright 2023 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.// Trace buffer management.package runtimeimport ()// Maximum number of bytes required to encode uint64 in base-128.const traceBytesPerNumber = 10// traceWriter is the interface for writing all trace data.//// This type is passed around as a value, and all of its methods return// a new traceWriter. This allows for chaining together calls in a fluent-style// API. This is partly stylistic, and very slightly for performance, since// the compiler can destructure this value and pass it between calls as// just regular arguments. However, this style is not load-bearing, and// we can change it if it's deemed too error-prone.type traceWriter struct {traceLockerexp traceExperiment*traceBuf}// writer returns an a traceWriter that writes into the current M's stream.//// Once this is called, the caller must guard against stack growth until// end is called on it. Therefore, it's highly recommended to use this// API in a "fluent" style, for example tl.writer().event(...).end().// Better yet, callers just looking to write events should use eventWriter// when possible, which is a much safer wrapper around this function.//// nosplit to allow for safe reentrant tracing from stack growth paths.////go:nosplitfunc ( traceLocker) () traceWriter {if debugTraceReentrancy {// Checks that the invariants of this function are being upheld.:= getg()if == .m.curg {.mp.trace.oldthrowsplit = .throwsplit.throwsplit = true}}return traceWriter{traceLocker: , traceBuf: .mp.trace.buf[.gen%2][traceNoExperiment]}}// unsafeTraceWriter produces a traceWriter that doesn't lock the trace.//// It should only be used in contexts where either:// - Another traceLocker is held.// - trace.gen is prevented from advancing.//// This does not have the same stack growth restrictions as traceLocker.writer.//// buf may be nil.func ( uintptr, *traceBuf) traceWriter {return traceWriter{traceLocker: traceLocker{gen: }, traceBuf: }}// event writes out the bytes of an event into the event stream.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( traceWriter) ( traceEv, ...traceArg) traceWriter {// N.B. Everything in this call must be nosplit to maintain// the stack growth related invariants for writing events.// Make sure we have room., _ = .ensure(1 + (len()+1)*traceBytesPerNumber)// Compute the timestamp diff that we'll put in the trace.:= traceClockNow()if <= .traceBuf.lastTime {= .traceBuf.lastTime + 1}:= uint64( - .traceBuf.lastTime).traceBuf.lastTime =// Write out event..byte(byte()).varint()for , := range {.varint(uint64())}return}// end writes the buffer back into the m.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( traceWriter) () {if .mp == nil {// Tolerate a nil mp. It makes code that creates traceWriters directly// less error-prone.return}.mp.trace.buf[.gen%2][.exp] = .traceBufif debugTraceReentrancy {// The writer is no longer live, we can drop throwsplit (if it wasn't// already set upon entry).:= getg()if == .m.curg {.throwsplit = .mp.trace.oldthrowsplit}}}// ensure makes sure that at least maxSize bytes are available to write.//// Returns whether the buffer was flushed.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( traceWriter) ( int) (traceWriter, bool) {:= .traceBuf == nil || !.available()if {= .refill()}return ,}// flush puts w.traceBuf on the queue of full buffers.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( traceWriter) () traceWriter {systemstack(func() {lock(&trace.lock)if .traceBuf != nil {traceBufFlush(.traceBuf, .gen)}unlock(&trace.lock)}).traceBuf = nilreturn}// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.func ( traceWriter) () traceWriter {systemstack(func() {lock(&trace.lock)if .traceBuf != nil {traceBufFlush(.traceBuf, .gen)}if trace.empty != nil {.traceBuf = trace.emptytrace.empty = .traceBuf.linkunlock(&trace.lock)} else {unlock(&trace.lock).traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))if .traceBuf == nil {throw("trace: out of memory")}}})// Initialize the buffer.:= traceClockNow()if <= .traceBuf.lastTime {= .traceBuf.lastTime + 1}.traceBuf.lastTime =.traceBuf.link = nil.traceBuf.pos = 0// Tolerate a nil mp.:= ^uint64(0)if .mp != nil {= uint64(.mp.procid)}// Write the buffer's header.if .exp == traceNoExperiment {.byte(byte(traceEvEventBatch))} else {.byte(byte(traceEvExperimentalBatch)).byte(byte(.exp))}.varint(uint64(.gen)).varint(uint64()).varint(uint64()).traceBuf.lenPos = .varintReserve()return}// traceBufQueue is a FIFO of traceBufs.type traceBufQueue struct {head, tail *traceBuf}// push queues buf into queue of buffers.func ( *traceBufQueue) ( *traceBuf) {.link = nilif .head == nil {.head =} else {.tail.link =}.tail =}// pop dequeues from the queue of buffers.func ( *traceBufQueue) () *traceBuf {:= .headif == nil {return nil}.head = .linkif .head == nil {.tail = nil}.link = nilreturn}func ( *traceBufQueue) () bool {return .head == nil}// traceBufHeader is per-P tracing buffer.type traceBufHeader struct {link *traceBuf // in trace.empty/fulllastTime traceTime // when we wrote the last eventpos int // next write offset in arrlenPos int // position of batch length value}// traceBuf is per-M tracing buffer.//// TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.type traceBuf struct {_ sys.NotInHeaptraceBufHeaderarr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf}// byte appends v to buf.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( *traceBuf) ( byte) {.arr[.pos] =.pos++}// varint appends v to buf in little-endian-base-128 encoding.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( *traceBuf) ( uint64) {:= .pos:= .arr[ : +traceBytesPerNumber]for := range {if < 0x80 {+= + 1[] = byte()break}[] = 0x80 | byte()>>= 7}.pos =}// varintReserve reserves enough space in buf to hold any varint.//// Space reserved this way can be filled in with the varintAt method.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( *traceBuf) () int {:= .pos.pos += traceBytesPerNumberreturn}// stringData appends s's data directly to buf.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( *traceBuf) ( string) {.pos += copy(.arr[.pos:], )}// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( *traceBuf) ( int) bool {return len(.arr)-.pos >=}// varintAt writes varint v at byte position pos in buf. This always// consumes traceBytesPerNumber bytes. This is intended for when the caller// needs to reserve space for a varint but can't populate it until later.// Use varintReserve to reserve this space.//// nosplit because it's part of writing an event for an M, which must not// have any stack growth.////go:nosplitfunc ( *traceBuf) ( int, uint64) {for := 0; < traceBytesPerNumber; ++ {if < traceBytesPerNumber-1 {.arr[] = 0x80 | byte()} else {.arr[] = byte()}>>= 7++}if != 0 {throw("v could not fit in traceBytesPerNumber")}}// traceBufFlush flushes a trace buffer.//// Must run on the system stack because trace.lock must be held.////go:systemstackfunc ( *traceBuf, uintptr) {assertLockHeld(&trace.lock)// Write out the non-header length of the batch in the header.//// Note: the length of the header is not included to make it easier// to calculate this value when deserializing and reserializing the// trace. Varints can have additional padding of zero bits that is// quite difficult to preserve, and if we include the header we// force serializers to do more work. Nothing else actually needs// padding..varintAt(.lenPos, uint64(.pos-(.lenPos+traceBytesPerNumber)))trace.full[%2].push()// Notify the scheduler that there's work available and that the trace// reader should be scheduled.if !trace.workAvailable.Load() {trace.workAvailable.Store(true)}}
The pages are generated with Golds v0.7.6. (GOOS=linux GOARCH=amd64)