internal/runtime/atomic.Int32.Load (method)

54 uses

	internal/runtime/atomic (current package)
		types.go#L20: func (i *Int32) Load() int32 {

	runtime
		chan.go#L819: 	async := debug.asynctimerchan.Load() != 0
		chan.go#L837: 		async := debug.asynctimerchan.Load() != 0
		cpuprof.go#L113: 	if prof.hz.Load() != 0 { // implies cpuprof.log != nil
		metrics.go#L288: 				out.scalar = uint64(gcController.gcPercent.Load())
		mgc.go#L611: 		if gcController.gcPercent.Load() < 0 {
		mgcpacer.go#L491: 	gcPercent := c.gcPercent.Load()
		mgcpacer.go#L1214: 	if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
		mgcpacer.go#L1259: 	out := c.gcPercent.Load()
		mprof.go#L805: 	if debug.runtimeContentionStacks.Load() == 0 {
		panic.go#L744: 		if debug.panicnil.Load() != 1 {
		proc.go#L384: 	if !gp.preempt && sched.npidle.Load() > 0 {
		proc.go#L3072: 	if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) { // TODO: fast atomic
		proc.go#L3102: 	if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
		proc.go#L3135: 	if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
		proc.go#L3402: 	if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
		proc.go#L3970: 	npidle := int(sched.npidle.Load())
		proc.go#L5005: 		for pendingPreemptSignals.Load() > 0 {
		proc.go#L5435: 	n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
		proc.go#L5473: 	if prof.hz.Load() == 0 {
		proc.go#L5565: 	if prof.hz.Load() != 0 {
		proc.go#L5611: 	if prof.hz.Load() != hz {
		proc.go#L6147: 		if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
		proc.go#L6149: 			if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
		proc.go#L6313: 			if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
		proc.go#L6407: 	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
		proc.go#L6503: 		for ; n != 0 && sched.npidle.Load() != 0; n-- {
		proc.go#L7212: 	if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
		signal_unix.go#L516: 	if prof.hz.Load() != 0 {
		signal_unix.go#L539: 	if prof.hz.Load() != 0 {
		signal_unix.go#L758: 	if crashing.Load() == 0 {
		signal_unix.go#L768: 		if crashing.Load() > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
		signal_unix.go#L772: 		} else if crashing.Load() == 0 {
		signal_unix.go#L789: 		if crashing.Load() < mcount()-int32(extraMLength.Load()) {
		signal_unix.go#L810: 			maxCrashing := crashing.Load()
		signal_unix.go#L811: 			for timeout > 0 && (crashing.Load() < mcount()-int32(extraMLength.Load())) {
		signal_unix.go#L815: 				if c := crashing.Load(); c > maxCrashing {
		signal_unix.go#L823: 			c := crashing.Load()
		signal_unix.go#L827: 				c = crashing.Load()
		synctest.go#L155: 	if debug.asynctimerchan.Load() != 0 {
		time.go#L473: 	async := debug.asynctimerchan.Load() != 0
		time.go#L503: 		if t.period == 0 && t.isSending.Load() > 0 {
		time.go#L554: 	async := debug.asynctimerchan.Load() != 0
		time.go#L607: 		if oldPeriod == 0 && t.isSending.Load() > 0 {
		time.go#L969: 	zombies := ts.zombies.Load()
		time.go#L999: 		force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4
		time.go#L1108: 	async := debug.asynctimerchan.Load() != 0
		trace.go#L254: 	if debug.traceallocfree.Load() != 0 {
		trace.go#L275: 	for trace.exitingSyscall.Load() != 0 {