func runtime/internal/atomic.Xadd
67 uses
runtime/internal/atomic (current package)
atomic_amd64.go#L51: func Xadd(ptr *uint32, delta int32) uint32
types.go#L218: return Xadd(&u.value, delta)
runtime
mfinal.go#L117: atomic.Xadd(&finq.cnt, +1) // Sync with markroots
mgc.go#L818: atomic.Xadd(&gcMarkDoneFlushed, 1)
mgc.go#L1257: decnwait := atomic.Xadd(&work.nwait, -1)
mgc.go#L1309: incnwait := atomic.Xadd(&work.nwait, +1)
mgcmark.go#L545: decnwait := atomic.Xadd(&work.nwait, -1)
mgcmark.go#L573: incnwait := atomic.Xadd(&work.nwait, +1)
mgcmark.go#L1043: job := atomic.Xadd(&work.markrootNext, +1) - 1
mgcmark.go#L1158: job := atomic.Xadd(&work.markrootNext, +1) - 1
mspanset.go#L214: if atomic.Xadd(&block.popped, 1) == spanSetBlockEntries {
mstats.go#L804: seq := atomic.Xadd(&pp.statsSeq, 1)
mstats.go#L833: seq := atomic.Xadd(&pp.statsSeq, 1)
netpoll.go#L481: atomic.Xadd(&netpollWaiters, 1)
netpoll.go#L487: atomic.Xadd(&netpollWaiters, -1)
panic.go#L794: atomic.Xadd(&runningPanicDefers, 1)
panic.go#L874: atomic.Xadd(&runningPanicDefers, -1)
panic.go#L1080: atomic.Xadd(&runningPanicDefers, -1)
panic.go#L1135: atomic.Xadd(&panicking, 1)
panic.go#L1197: if atomic.Xadd(&panicking, -1) != 0 {
proc.go#L1544: atomic.Xadd(&pendingPreemptSignals, -1)
proc.go#L1864: atomic.Xadd(&sched.ngsys, -1)
proc.go#L1926: atomic.Xadd(&sched.ngsys, +1)
proc.go#L1967: atomic.Xadd(&sched.ngsys, +1)
proc.go#L2019: atomic.Xadd(&extraMWaiters, 1)
proc.go#L2275: if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
proc.go#L2466: if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
proc.go#L2596: atomic.Xadd(&sched.nmspinning, 1)
proc.go#L2700: if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
proc.go#L2715: atomic.Xadd(&sched.nmspinning, 1)
proc.go#L2724: atomic.Xadd(&sched.nmspinning, 1)
proc.go#L2794: atomic.Xadd(&sched.nmspinning, 1)
proc.go#L3029: nmspinning := atomic.Xadd(&sched.nmspinning, -1)
proc.go#L3460: atomic.Xadd(&sched.ngsys, -1)
proc.go#L4115: atomic.Xadd(&sched.ngsys, +1)
proc.go#L5623: atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
proc.go#L5641: atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
rwmutex.go#L39: if int32(atomic.Xadd(&rw.readerCount, 1)) < 0 {
rwmutex.go#L63: if r := int32(atomic.Xadd(&rw.readerCount, -1)); r < 0 {
rwmutex.go#L68: if atomic.Xadd(&rw.readerWait, -1) == 0 {
rwmutex.go#L87: r := int32(atomic.Xadd(&rw.readerCount, -rwmutexMaxReaders)) + rwmutexMaxReaders
rwmutex.go#L90: if r != 0 && atomic.Xadd(&rw.readerWait, r) != 0 {
rwmutex.go#L106: r := int32(atomic.Xadd(&rw.readerCount, rwmutexMaxReaders))
sema.go#L134: atomic.Xadd(&root.nwait, 1)
sema.go#L137: atomic.Xadd(&root.nwait, -1)
sema.go#L161: atomic.Xadd(addr, 1)
sema.go#L180: atomic.Xadd(&root.nwait, -1)
sema.go#L482: return atomic.Xadd(&l.wait, 1) - 1
signal_unix.go#L350: atomic.Xadd(&gp.m.preemptGen, 1)
signal_unix.go#L354: atomic.Xadd(&pendingPreemptSignals, -1)
signal_unix.go#L375: atomic.Xadd(&pendingPreemptSignals, 1)
signal_unix.go#L454: atomic.Xadd(&pendingPreemptSignals, -1)
sigqueue.go#L77: atomic.Xadd(&sig.delivering, 1)
sigqueue.go#L81: atomic.Xadd(&sig.delivering, -1)
sigqueue.go#L89: atomic.Xadd(&sig.delivering, -1)
sigqueue.go#L122: atomic.Xadd(&sig.delivering, -1)
time.go#L300: atomic.Xadd(&pp.numTimers, 1)
time.go#L323: atomic.Xadd(&tpp.deletedTimers, 1)
time.go#L341: atomic.Xadd(&tpp.deletedTimers, 1)
time.go#L394: atomic.Xadd(&pp.numTimers, -1)
time.go#L418: atomic.Xadd(&pp.numTimers, -1)
time.go#L466: atomic.Xadd(&t.pp.ptr().deletedTimers, -1)
time.go#L575: atomic.Xadd(&pp.deletedTimers, -1)
time.go#L684: atomic.Xadd(&pp.deletedTimers, -1)
time.go#L786: atomic.Xadd(&pp.deletedTimers, -1)
time.go#L950: atomic.Xadd(&pp.deletedTimers, -cdel)
time.go#L951: atomic.Xadd(&pp.numTimers, -cdel)