package runtime
import (
)
var (
metricsSema uint32 = 1
metricsInit bool
metrics map[string]metricData
sizeClassBuckets []float64
timeHistBuckets []float64
)
type metricData struct {
deps statDepSet
compute func(in *statAggregate, out *metricValue)
}
func () {
semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
if raceenabled {
raceacquire(unsafe.Pointer(&metricsSema))
}
}
func () {
if raceenabled {
racerelease(unsafe.Pointer(&metricsSema))
}
semrelease(&metricsSema)
}
func () {
if metricsInit {
return
}
sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
sizeClassBuckets[0] = 1
for := 1; < _NumSizeClasses; ++ {
sizeClassBuckets[] = float64(class_to_size[] + 1)
}
sizeClassBuckets = append(sizeClassBuckets, float64Inf())
timeHistBuckets = timeHistogramMetricsBuckets()
metrics = map[string]metricData{
"/cgo/go-to-c-calls:calls": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(NumCgoCall())
},
},
"/cpu/classes/gc/mark/assist:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.GCAssistTime))
},
},
"/cpu/classes/gc/mark/dedicated:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.GCDedicatedTime))
},
},
"/cpu/classes/gc/mark/idle:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.GCIdleTime))
},
},
"/cpu/classes/gc/pause:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.GCPauseTime))
},
},
"/cpu/classes/gc/total:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.GCTotalTime))
},
},
"/cpu/classes/idle:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.IdleTime))
},
},
"/cpu/classes/scavenge/assist:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.ScavengeAssistTime))
},
},
"/cpu/classes/scavenge/background:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.ScavengeBgTime))
},
},
"/cpu/classes/scavenge/total:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.ScavengeTotalTime))
},
},
"/cpu/classes/total:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.TotalTime))
},
},
"/cpu/classes/user:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(.cpuStats.UserTime))
},
},
"/gc/cycles/automatic:gc-cycles": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.gcCyclesDone - .sysStats.gcCyclesForced
},
},
"/gc/cycles/forced:gc-cycles": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.gcCyclesForced
},
},
"/gc/cycles/total:gc-cycles": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.gcCyclesDone
},
},
"/gc/scan/globals:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .gcStats.globalsScan
},
},
"/gc/scan/heap:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .gcStats.heapScan
},
},
"/gc/scan/stack:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .gcStats.stackScan
},
},
"/gc/scan/total:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .gcStats.totalScan
},
},
"/gc/heap/allocs-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
:= .float64HistOrInit(sizeClassBuckets)
.counts[len(.counts)-1] = .heapStats.largeAllocCount
for , := range .heapStats.smallAllocCount[1:] {
.counts[] =
}
},
},
"/gc/heap/allocs:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .heapStats.totalAllocated
},
},
"/gc/heap/allocs:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .heapStats.totalAllocs
},
},
"/gc/heap/frees-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
:= .float64HistOrInit(sizeClassBuckets)
.counts[len(.counts)-1] = .heapStats.largeFreeCount
for , := range .heapStats.smallFreeCount[1:] {
.counts[] =
}
},
},
"/gc/heap/frees:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .heapStats.totalFreed
},
},
"/gc/heap/frees:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .heapStats.totalFrees
},
},
"/gc/heap/goal:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.heapGoal
},
},
"/gc/gomemlimit:bytes": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(gcController.memoryLimit.Load())
},
},
"/gc/gogc:percent": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(gcController.gcPercent.Load())
},
},
"/gc/heap/live:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = gcController.heapMarked
},
},
"/gc/heap/objects:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .heapStats.numObjects
},
},
"/gc/heap/tiny/allocs:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .heapStats.tinyAllocCount
},
},
"/gc/limiter/last-enabled:gc-cycle": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
},
},
"/gc/pauses:seconds": {
compute: func( *statAggregate, *metricValue) {
sched.stwTotalTimeGC.write()
},
},
"/gc/stack/starting-size:bytes": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(startingStackSize)
},
},
"/memory/classes/heap/free:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(.heapStats.committed - .heapStats.inHeap -
.heapStats.inStacks - .heapStats.inWorkBufs -
.heapStats.inPtrScalarBits)
},
},
"/memory/classes/heap/objects:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .heapStats.inObjects
},
},
"/memory/classes/heap/released:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(.heapStats.released)
},
},
"/memory/classes/heap/stacks:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(.heapStats.inStacks)
},
},
"/memory/classes/heap/unused:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(.heapStats.inHeap) - .heapStats.inObjects
},
},
"/memory/classes/metadata/mcache/free:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.mCacheSys - .sysStats.mCacheInUse
},
},
"/memory/classes/metadata/mcache/inuse:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.mCacheInUse
},
},
"/memory/classes/metadata/mspan/free:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.mSpanSys - .sysStats.mSpanInUse
},
},
"/memory/classes/metadata/mspan/inuse:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.mSpanInUse
},
},
"/memory/classes/metadata/other:bytes": {
deps: makeStatDepSet(heapStatsDep, sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(.heapStats.inWorkBufs+.heapStats.inPtrScalarBits) + .sysStats.gcMiscSys
},
},
"/memory/classes/os-stacks:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.stacksSys
},
},
"/memory/classes/other:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.otherSys
},
},
"/memory/classes/profiling/buckets:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = .sysStats.buckHashSys
},
},
"/memory/classes/total:bytes": {
deps: makeStatDepSet(heapStatsDep, sysStatsDep),
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(.heapStats.committed+.heapStats.released) +
.sysStats.stacksSys + .sysStats.mSpanSys +
.sysStats.mCacheSys + .sysStats.buckHashSys +
.sysStats.gcMiscSys + .sysStats.otherSys
},
},
"/sched/gomaxprocs:threads": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(gomaxprocs)
},
},
"/sched/goroutines:goroutines": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = uint64(gcount())
},
},
"/sched/latencies:seconds": {
compute: func( *statAggregate, *metricValue) {
sched.timeToRun.write()
},
},
"/sched/pauses/stopping/gc:seconds": {
compute: func( *statAggregate, *metricValue) {
sched.stwStoppingTimeGC.write()
},
},
"/sched/pauses/stopping/other:seconds": {
compute: func( *statAggregate, *metricValue) {
sched.stwStoppingTimeOther.write()
},
},
"/sched/pauses/total/gc:seconds": {
compute: func( *statAggregate, *metricValue) {
sched.stwTotalTimeGC.write()
},
},
"/sched/pauses/total/other:seconds": {
compute: func( *statAggregate, *metricValue) {
sched.stwTotalTimeOther.write()
},
},
"/sync/mutex/wait/total:seconds": {
compute: func( *statAggregate, *metricValue) {
.kind = metricKindFloat64
.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos()))
},
},
}
for , := range godebugs.All {
if !.Opaque {
metrics["/godebug/non-default-behavior/"+.Name+":events"] = metricData{compute: compute0}
}
}
metricsInit = true
}
func ( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = 0
}
type metricReader func() uint64
func ( metricReader) ( *statAggregate, *metricValue) {
.kind = metricKindUint64
.scalar = ()
}
func ( string, func() uint64) {
metricsLock()
initMetrics()
, := metrics[]
if ! {
throw("runtime: unexpected metric registration for " + )
}
.compute = metricReader().compute
metrics[] =
metricsUnlock()
}
type statDep uint
const (
heapStatsDep statDep = iota
sysStatsDep
cpuStatsDep
gcStatsDep
numStatsDeps
)
type statDepSet [1]uint64
func ( ...statDep) statDepSet {
var statDepSet
for , := range {
[/64] |= 1 << ( % 64)
}
return
}
func ( statDepSet) ( statDepSet) statDepSet {
var statDepSet
for := range {
[] = [] &^ []
}
return
}
func ( statDepSet) ( statDepSet) statDepSet {
var statDepSet
for := range {
[] = [] | []
}
return
}
func ( *statDepSet) () bool {
for , := range {
if != 0 {
return false
}
}
return true
}
func ( *statDepSet) ( statDep) bool {
return [/64]&(1<<(%64)) != 0
}
type heapStatsAggregate struct {
heapStatsDelta
inObjects uint64
numObjects uint64
totalAllocated uint64
totalFreed uint64
totalAllocs uint64
totalFrees uint64
}
func ( *heapStatsAggregate) () {
memstats.heapStats.read(&.heapStatsDelta)
.totalAllocs = .largeAllocCount
.totalFrees = .largeFreeCount
.totalAllocated = .largeAlloc
.totalFreed = .largeFree
for := range .smallAllocCount {
:= .smallAllocCount[]
:= .smallFreeCount[]
.totalAllocs +=
.totalFrees +=
.totalAllocated += * uint64(class_to_size[])
.totalFreed += * uint64(class_to_size[])
}
.inObjects = .totalAllocated - .totalFreed
.numObjects = .totalAllocs - .totalFrees
}
type sysStatsAggregate struct {
stacksSys uint64
mSpanSys uint64
mSpanInUse uint64
mCacheSys uint64
mCacheInUse uint64
buckHashSys uint64
gcMiscSys uint64
otherSys uint64
heapGoal uint64
gcCyclesDone uint64
gcCyclesForced uint64
}
func ( *sysStatsAggregate) () {
.stacksSys = memstats.stacks_sys.load()
.buckHashSys = memstats.buckhash_sys.load()
.gcMiscSys = memstats.gcMiscSys.load()
.otherSys = memstats.other_sys.load()
.heapGoal = gcController.heapGoal()
.gcCyclesDone = uint64(memstats.numgc)
.gcCyclesForced = uint64(memstats.numforcedgc)
systemstack(func() {
lock(&mheap_.lock)
.mSpanSys = memstats.mspan_sys.load()
.mSpanInUse = uint64(mheap_.spanalloc.inuse)
.mCacheSys = memstats.mcache_sys.load()
.mCacheInUse = uint64(mheap_.cachealloc.inuse)
unlock(&mheap_.lock)
})
}
type cpuStatsAggregate struct {
cpuStats
}
func ( *cpuStatsAggregate) () {
.cpuStats = work.cpuStats
}
type gcStatsAggregate struct {
heapScan uint64
stackScan uint64
globalsScan uint64
totalScan uint64
}
func ( *gcStatsAggregate) () {
.heapScan = gcController.heapScan.Load()
.stackScan = gcController.lastStackScan.Load()
.globalsScan = gcController.globalsScan.Load()
.totalScan = .heapScan + .stackScan + .globalsScan
}
func ( int64) float64 {
return float64() / 1e9
}
type statAggregate struct {
ensured statDepSet
heapStats heapStatsAggregate
sysStats sysStatsAggregate
cpuStats cpuStatsAggregate
gcStats gcStatsAggregate
}
func ( *statAggregate) ( *statDepSet) {
:= .difference(.ensured)
if .empty() {
return
}
for := statDep(0); < numStatsDeps; ++ {
if !.has() {
continue
}
switch {
case heapStatsDep:
.heapStats.compute()
case sysStatsDep:
.sysStats.compute()
case cpuStatsDep:
.cpuStats.compute()
case gcStatsDep:
.gcStats.compute()
}
}
.ensured = .ensured.union()
}
type metricKind int
const (
metricKindBad metricKind = iota
metricKindUint64
metricKindFloat64
metricKindFloat64Histogram
)
type metricSample struct {
name string
value metricValue
}
type metricValue struct {
kind metricKind
scalar uint64
pointer unsafe.Pointer
}
func ( *metricValue) ( []float64) *metricFloat64Histogram {
var *metricFloat64Histogram
if .kind == metricKindFloat64Histogram && .pointer != nil {
= (*metricFloat64Histogram)(.pointer)
} else {
.kind = metricKindFloat64Histogram
= new(metricFloat64Histogram)
.pointer = unsafe.Pointer()
}
.buckets =
if len(.counts) != len(.buckets)-1 {
.counts = make([]uint64, len()-1)
}
return
}
type metricFloat64Histogram struct {
counts []uint64
buckets []float64
}
var agg statAggregate
type metricName struct {
name string
kind metricKind
}
func () []string {
metricsLock()
initMetrics()
:= len(metrics)
metricsUnlock()
:= make([]string, 0, )
metricsLock()
for := range metrics {
= append(, )
}
metricsUnlock()
return
}
func ( unsafe.Pointer, int, int) {
metricsLock()
initMetrics()
readMetricsLocked(, , )
metricsUnlock()
}
func ( unsafe.Pointer, int, int) {
:= slice{, , }
:= *(*[]metricSample)(unsafe.Pointer(&))
agg = statAggregate{}
for := range {
:= &[]
, := metrics[.name]
if ! {
.value.kind = metricKindBad
continue
}
agg.ensure(&.deps)
.compute(&agg, &.value)
}
}