internal/runtime/atomic.Int64.Load (method)
62 uses
internal/runtime/atomic (current package)
types.go#L73: func (i *Int64) Load() int64 {
runtime
debug.go#L168: total := sched.totalMutexWaitTime.Load()
debug.go#L170: total += sched.totalRuntimeLockWaitTime.Load()
debug.go#L172: total += mp.mLockProfile.waitTime.Load()
metrics.go#L283: out.scalar = uint64(gcController.memoryLimit.Load())
mgc.go#L1273: gcController.assistTime.Load(),
mgc.go#L1274: gcController.dedicatedMarkTime.Load() + gcController.fractionalMarkTime.Load(),
mgc.go#L1275: gcController.idleMarkTime.Load(),
mgclimit.go#L130: if lastUpdate := l.lastUpdate.Load(); now >= lastUpdate {
mgclimit.go#L145: return now-l.lastUpdate.Load() > gcCPULimiterUpdatePeriod
mgclimit.go#L180: lastUpdate := l.lastUpdate.Load()
mgclimit.go#L189: assistTime := l.assistTimePool.Load()
mgclimit.go#L195: idleTime := l.idleTimePool.Load()
mgcmark.go#L520: bgScanCredit := gcController.bgScanCredit.Load()
mgcmark.go#L765: if gcController.bgScanCredit.Load() > 0 {
mgcpacer.go#L501: work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
mgcpacer.go#L612: utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs))
mgcpacer.go#L628: idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs))
mgcpacer.go#L658: scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
mgcpacer.go#L680: print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ")
mgcpacer.go#L698: needDedicated := c.dedicatedMarkWorkersNeeded.Load() > 0
mgcpacer.go#L778: if c.dedicatedMarkWorkersNeeded.Load() <= 0 && c.fractionalUtilizationGoal == 0 {
mgcpacer.go#L808: v := val.Load()
mgcpacer.go#L861: c.heapScan.Store(uint64(c.heapScanWork.Load()))
mgcpacer.go#L862: c.lastHeapScan = uint64(c.heapScanWork.Load())
mgcpacer.go#L863: c.lastStackScan.Store(uint64(c.stackScanWork.Load()))
mgcpacer.go#L1060: memoryLimit := uint64(c.memoryLimit.Load())
mgcpacer.go#L1331: out := c.memoryLimit.Load()
mgcpacer.go#L1479: gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal)
mheap.go#L1330: if limit := gcController.memoryLimit.Load(); !gcCPULimiter.limiting() {
mranges.go#L181: old := b.a.Load()
mranges.go#L196: old := b.a.Load()
mranges.go#L223: v := b.a.Load()
mstats.go#L949: markAssistCpu = gcController.assistTime.Load()
mstats.go#L950: markDedicatedCpu = gcController.dedicatedMarkTime.Load()
mstats.go#L951: markFractionalCpu = gcController.fractionalMarkTime.Load()
mstats.go#L952: markIdleCpu = gcController.idleMarkTime.Load()
mstats.go#L958: scavAssistCpu := scavenge.assistTime.Load()
mstats.go#L959: scavBgCpu := scavenge.backgroundTime.Load()
mstats.go#L974: s.IdleTime += sched.idleTime.Load()
proc.go#L2062: sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
proc.go#L3187: if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
proc.go#L3483: if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
proc.go#L3779: pollerPollUntil := sched.pollUntil.Load()
proc.go#L3800: if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
proc.go#L3991: if sched.lastpoll.Load() == 0 {
proc.go#L3996: pollerPollUntil := sched.pollUntil.Load()
proc.go#L6310: lastpoll := sched.lastpoll.Load()
runtime.go#L79: r := ticks.val.Load()
runtime.go#L87: r = ticks.val.Load()
time.go#L611: if min := t.ts.minWhenModified.Load(); min == 0 || when < min {
time.go#L867: first := ts.minWhenModified.Load()
time.go#L988: nextWhen := ts.minWhenModified.Load()
time.go#L989: when := ts.minWhenHeap.Load()
time.go#L1306: old := ts.minWhenModified.Load()