internal/runtime/atomic.Uint32.Load (method)
88 uses
internal/runtime/atomic (current package)
types.go#L193: func (u *Uint32) Load() uint32 {
runtime
cgocall.go#L385: if gp.m.needextram || extraMWaiters.Load() > 0 {
debuglog.go#L87: if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
mcache.go#L330: flushGen := c.flushGen.Load()
metrics.go#L315: out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
mfinal.go#L165: if fingStatus.Load() == fingUninitialized && fingStatus.CompareAndSwap(fingUninitialized, fingCreated) {
mgc.go#L494: n := work.cycles.Load()
mgc.go#L509: for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
mgc.go#L524: for work.cycles.Load() == n+1 && !isSweepDone() {
mgc.go#L532: cycle := work.cycles.Load()
mgc.go#L545: nMarks := work.cycles.Load()
mgc.go#L603: if !memstats.enablegc || panicking.Load() != 0 || gcphase != _GCoff {
mgc.go#L618: return int32(t.n-work.cycles.Load()) > 0
mgc.go#L702: if fg := p.mcache.flushGen.Load(); fg != mheap_.sweepgen {
mgcpacer.go#L1282: gcWaitOnMark(work.cycles.Load())
mgcsweep.go#L150: state := a.state.Load()
mgcsweep.go#L167: state := a.state.Load()
mgcsweep.go#L192: state := a.state.Load()
mgcsweep.go#L204: return a.state.Load() &^ sweepDrainedMask
mgcsweep.go#L211: return a.state.Load() == sweepDrainedMask
mprof.go#L191: v := c.value.Load()
mprof.go#L200: prev := c.value.Load()
mprof.go#L217: prev := c.value.Load()
mprof.go#L1358: return goroutineProfileState((*atomic.Uint32)(p).Load())
mprof.go#L1392: if fingStatus.Load()&fingRunningFinalizer != 0 {
mspanset.go#L247: if block.popped.Load() == 0 {
mspanset.go#L253: if block.popped.Load() == spanSetBlockEntries {
mstats.go#L787: gen := m.gen.Load() % 3
mstats.go#L858: currGen := m.gen.Load()
mstats.go#L881: for p.statsSeq.Load()%2 != 0 {
netpoll.go#L143: return pollInfo(pd.atomicInfo.Load())
netpoll.go#L167: x := pd.atomicInfo.Load()
netpoll.go#L169: x = pd.atomicInfo.Load()
netpoll.go#L178: x := pd.atomicInfo.Load()
netpoll.go#L184: x = pd.atomicInfo.Load()
netpoll.go#L219: if netpollInited.Load() == 0 {
netpoll.go#L223: if netpollInited.Load() == 0 {
netpoll.go#L232: return netpollInited.Load() != 0
netpoll.go#L678: return netpollWaiters.Load() > 0
preempt.go#L195: if gp.preemptStop && gp.preempt && gp.stackguard0 == stackPreempt && asyncM == gp.m && asyncM.preemptGen.Load() == asyncGen {
preempt.go#L211: asyncGen2 := asyncM2.preemptGen.Load()
print.go#L42: if panicking.Load() == 0 {
proc.go#L293: if runningPanicDefers.Load() != 0 {
proc.go#L296: if runningPanicDefers.Load() == 0 {
proc.go#L302: if panicking.Load() != 0 {
proc.go#L918: count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
proc.go#L1128: return gp.atomicstatus.Load()
proc.go#L1212: if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
proc.go#L1223: for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
proc.go#L1998: if mp.signalPending.Load() != 0 {
proc.go#L2223: wait := freem.freeWait.Load()
proc.go#L2418: } else if extraMLength.Load() == 0 {
proc.go#L3351: if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
proc.go#L3485: if !mp.spinning && sched.needspinning.Load() == 1 {
proc.go#L5524: if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
proc.go#L6003: if panicking.Load() > 0 {
proc.go#L6012: if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
proc.go#L6229: if scavenger.sysmonWake.Load() != 0 {
proc.go#L6407: print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
proc.go#L6667: if pp.timers.len.Load() == 0 {
profbuf.go#L395: if b.eof.Load() > 0 {
profbuf.go#L474: if b.eof.Load() > 0 {
runtime.go#L237: gp == nil && panicking.Load() > 0 {
runtime.go#L259: if panicking.Load() > 0 {
runtime.go#L286: if panicking.Load() > 0 {
sema.go#L210: if root.nwait.Load() == 0 {
sema.go#L216: if root.nwait.Load() == 0 {
sema.go#L610: if l.wait.Load() == atomic.Load(&l.notify) {
sema.go#L625: atomic.Store(&l.notify, l.wait.Load())
sema.go#L647: if l.wait.Load() == atomic.Load(&l.notify) {
sema.go#L655: if t == l.wait.Load() {
signal_unix.go#L789: if crashing.Load() < mcount()-int32(extraMLength.Load()) {
signal_unix.go#L811: for timeout > 0 && (crashing.Load() < mcount()-int32(extraMLength.Load())) {
sigqueue.go#L100: switch sig.state.Load() {
sigqueue.go#L143: switch sig.state.Load() {
sigqueue.go#L185: for sig.delivering.Load() != 0 {
sigqueue.go#L192: for sig.state.Load() != sigReceiving {
symtab.go#L1032: if strict && panicking.Load() == 0 {
symtab.go#L1087: if panicking.Load() != 0 || !strict {
time.go#L429: if netpollInited.Load() == 0 {
time.go#L973: force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4
time.go#L999: force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4
time.go#L1216: if n := int(ts.len.Load()); len(ts.heap) != n {
traceback.go#L1356: return fingStatus.Load()&fingRunningFinalizer == 0
traceback.go#L1631: if panicking.Load() > 0 || getg().m.curg != getg() {
traceback.go#L1651: if panicking.Load() > 0 || getg().m.curg != getg() {
tracestatus.go#L223: return r.statusTraced[gen%3].Load() != 0