const internal/goarch.PtrSize
308 uses
internal/goarch (current package)
goarch.go#L33: const PtrSize = 4 << (^uintptr(0) >> 63)
goarch.go#L49: const Int64Align = PtrSize
goarch_amd64.go#L12: _StackAlign = PtrSize
internal/abi
abi.go#L75: if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
abi.go#L80: offset = goarch.PtrSize - argSize
internal/reflectlite
swapper.go#L40: if size == goarch.PtrSize {
value.go#L103: if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
internal/runtime/maps
map.go#L244: if goarch.PtrSize == 4 {
map.go#L337: return *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i))
map.go#L341: *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) = nt
runtime_faststr_swiss.go#L52: return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
runtime_faststr_swiss.go#L66: return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
runtime_faststr_swiss.go#L146: slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
runtime_faststr_swiss.go#L205: slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
table.go#L1036: if goarch.PtrSize == 4 {
internal/runtime/math
math.go#L14: if a|b < 1<<(4*goarch.PtrSize) || a == 0 {
internal/runtime/sys
consts.go#L25: const Int64Align = goarch.PtrSize
internal/sync
hashtriemap.go#L69: hashShift := 8 * goarch.PtrSize
hashtriemap.go#L98: hashShift = 8 * goarch.PtrSize
hashtriemap.go#L215: hashShift = 8 * goarch.PtrSize
hashtriemap.go#L341: if hashShift == 8*goarch.PtrSize {
hashtriemap.go#L403: if hashShift == 8*goarch.PtrSize {
hashtriemap.go#L430: hashShift = 8 * goarch.PtrSize
reflect
abi.go#L170: ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
abi.go#L179: ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
abi.go#L183: a.stackAssign(goarch.PtrSize, goarch.PtrSize)
abi.go#L205: switch goarch.PtrSize {
abi.go#L218: return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
abi.go#L220: return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
abi.go#L222: return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
abi.go#L265: if ptrMap != 0 && size != goarch.PtrSize {
abi.go#L416: spill += goarch.PtrSize
abi.go#L433: spill = align(spill, goarch.PtrSize)
abi.go#L438: retOffset := align(in.stackBytes, goarch.PtrSize)
swapper.go#L41: if size == goarch.PtrSize {
type.go#L2045: ptrs := typ.PtrBytes / goarch.PtrSize
type.go#L2046: words := typ.Size_ / goarch.PtrSize
type.go#L2485: } else if typ.PtrBytes <= abi.MaxPtrmaskBytes*8*goarch.PtrSize {
type.go#L2646: case array.PtrBytes <= abi.MaxPtrmaskBytes*8*goarch.PtrSize:
type.go#L2648: n := (array.PtrBytes/goarch.PtrSize + 7) / 8
type.go#L2650: n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
type.go#L2763: Align_: goarch.PtrSize,
type.go#L2768: Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
type.go#L2769: PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
type.go#L2804: if bv.n%(8*goarch.PtrSize) == 0 {
type.go#L2808: for i := 0; i < goarch.PtrSize; i++ {
type.go#L2824: for bv.n < uint32(offset/goarch.PtrSize) {
type.go#L2831: for bv.n < uint32(offset/goarch.PtrSize) {
value.go#L112: if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
value.go#L568: frameSize = align(frameSize, goarch.PtrSize)
value.go#L1092: methodFrameSize = align(methodFrameSize, goarch.PtrSize)
runtime
alg.go#L16: c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
alg.go#L17: c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
alg.go#L438: const hashRandomBytes = goarch.PtrSize / 4 * 64
arena.go#L231: return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
arena.go#L558: nb := typ.PtrBytes / goarch.PtrSize
arena.go#L606: h.low = offset / goarch.PtrSize % ptrBits
arena.go#L609: h.offset = offset - h.low*goarch.PtrSize
arena.go#L635: idx := h.offset / (ptrBits * goarch.PtrSize)
arena.go#L645: h.offset += ptrBits * goarch.PtrSize
arena.go#L655: words := size / goarch.PtrSize
arena.go#L671: zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
arena.go#L685: idx := h.offset / (ptrBits * goarch.PtrSize)
arena.go#L698: h.offset += ptrBits * goarch.PtrSize
arena.go#L707: idx := h.offset / (ptrBits * goarch.PtrSize)
arena.go#L718: h.offset += ptrBits * goarch.PtrSize
arena.go#L726: if goarch.PtrSize == 8 {
cgocall.go#L648: p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
cgocheck.go#L154: skipMask := off / goarch.PtrSize / 8
cgocheck.go#L155: skipBytes := skipMask * goarch.PtrSize * 8
cgocheck.go#L161: for i := uintptr(0); i < size; i += goarch.PtrSize {
cgocheck.go#L162: if i&(goarch.PtrSize*8-1) == 0 {
cgocheck.go#L169: off -= goarch.PtrSize
heapdump.go#L249: dumpint(uint64(offset + i*goarch.PtrSize))
heapdump.go#L299: for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
heapdump.go#L308: for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
heapdump.go#L315: for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
heapdump.go#L322: dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
heapdump.go#L515: dumpint(goarch.PtrSize)
heapdump.go#L724: nptr := size / goarch.PtrSize
heapdump.go#L745: i := (addr - p) / goarch.PtrSize
iface.go#L77: m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
iface.go#L114: p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
iface.go#L147: t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
iface.go#L175: p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
iface.go#L687: m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
lock_spinbit.go#L75: return &(*[8]uint8)(unsafe.Pointer(p))[goarch.PtrSize/1-1]
malloc.go#L147: _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
malloc.go#L248: heapArenaWords = heapArenaBytes / goarch.PtrSize
malloc.go#L256: heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
malloc.go#L452: if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
malloc.go#L476: } else if goarch.PtrSize == 8 {
malloc.go#L784: r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L786: r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L795: size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
malloc.go#L799: newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L804: *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
malloc.go#L1148: } else if goarch.PtrSize == 4 && size == 12 {
malloc.go#L1372: if goarch.PtrSize == 8 && sizeclass == 1 {
malloc.go#L1968: persistent.off = alignUp(goarch.PtrSize, align)
mbarrier.go#L250: if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize {
mbitmap.go#L101: minSizeForMallocHeader = goarch.PtrSize * ptrBits
mbitmap.go#L253: if goarch.PtrSize == 8 {
mbitmap.go#L261: return tp, tp.addr + uintptr(i)*goarch.PtrSize
mbitmap.go#L284: if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
mbitmap.go#L288: tp.addr += ptrBits * goarch.PtrSize
mbitmap.go#L297: tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
mbitmap.go#L298: if tp.addr+goarch.PtrSize*ptrBits > limit {
mbitmap.go#L299: bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
mbitmap.go#L321: tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
mbitmap.go#L323: if tp.addr+goarch.PtrSize*ptrBits > limit {
mbitmap.go#L324: bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
mbitmap.go#L337: tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
mbitmap.go#L339: tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
mbitmap.go#L356: tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
mbitmap.go#L357: tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
mbitmap.go#L359: if tp.addr+goarch.PtrSize*ptrBits > limit {
mbitmap.go#L360: bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
mbitmap.go#L418: if (dst|src|size)&(goarch.PtrSize-1) != 0 {
mbitmap.go#L504: if (dst|src|size)&(goarch.PtrSize-1) != 0 {
mbitmap.go#L538: if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
mbitmap.go#L588: bitmapSize := spanSize / goarch.PtrSize / 8
mbitmap.go#L589: elems := int(bitmapSize / goarch.PtrSize)
mbitmap.go#L603: bitmapSize := spanSize / goarch.PtrSize / 8
mbitmap.go#L614: i := (addr - span.base()) / goarch.PtrSize / ptrBits
mbitmap.go#L615: j := (addr - span.base()) / goarch.PtrSize % ptrBits
mbitmap.go#L616: bits := span.elemsize / goarch.PtrSize
mbitmap.go#L617: word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
mbitmap.go#L618: word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
mbitmap.go#L648: if typ.Size_ == goarch.PtrSize {
mbitmap.go#L649: src = (1 << (dataSize / goarch.PtrSize)) - 1
mbitmap.go#L658: src |= src0 << (i / goarch.PtrSize)
mbitmap.go#L664: src &= (1 << (dataSize / goarch.PtrSize)) - 1
mbitmap.go#L670: dst := unsafe.Pointer(span.base() + pageSize - pageSize/goarch.PtrSize/8)
mbitmap.go#L671: o := (x - span.base()) / goarch.PtrSize
mbitmap.go#L674: bits := span.elemsize / goarch.PtrSize
mbitmap.go#L679: dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
mbitmap.go#L680: dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
mbitmap.go#L685: dst := (*uintptr)(add(dst, i*goarch.PtrSize))
mbitmap.go#L760: off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
mbitmap.go#L763: off -= goarch.PtrSize
mbitmap.go#L764: size += goarch.PtrSize
mbitmap.go#L767: size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
mbitmap.go#L769: size = goarch.PtrSize
mbitmap.go#L787: for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
mbitmap.go#L793: j := off / goarch.PtrSize
mbitmap.go#L844: for i := off; i < off+size; i += goarch.PtrSize {
mbitmap.go#L850: j := off / goarch.PtrSize
mbitmap.go#L892: for i := off; i < off+size; i += goarch.PtrSize {
mbitmap.go#L898: j := off / goarch.PtrSize
mbitmap.go#L1316: const ptrBits = 8 * goarch.PtrSize
mbitmap.go#L1327: word := maskOffset / goarch.PtrSize
mbitmap.go#L1332: for i := uintptr(0); i < size; i += goarch.PtrSize {
mbitmap.go#L1337: i += 7 * goarch.PtrSize
mbitmap.go#L1386: for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
mbitmap.go#L1387: if i&(goarch.PtrSize*8-1) == 0 {
mbitmap.go#L1428: if goarch.PtrSize == 8 {
mbitmap.go#L1445: n := (size/goarch.PtrSize + 7) / 8
mbitmap.go#L1549: const maxBits = goarch.PtrSize*8 - 7
mbitmap.go#L1592: for nb <= goarch.PtrSize*8 {
mbitmap.go#L1742: mask = make([]byte, n/goarch.PtrSize)
mbitmap.go#L1743: for i := uintptr(0); i < n; i += goarch.PtrSize {
mbitmap.go#L1744: off := (uintptr(p) + i - datap.data) / goarch.PtrSize
mbitmap.go#L1745: mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
mbitmap.go#L1754: mask = make([]byte, n/goarch.PtrSize)
mbitmap.go#L1755: for i := uintptr(0); i < n; i += goarch.PtrSize {
mbitmap.go#L1756: off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
mbitmap.go#L1757: mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
mbitmap.go#L1777: maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
mbitmap.go#L1783: maskFromHeap[(addr-base)/goarch.PtrSize] = 1
mbitmap.go#L1802: maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
mbitmap.go#L1809: maskFromType[(addr-base)/goarch.PtrSize] = 1
mbitmap.go#L1870: size := uintptr(locals.n) * goarch.PtrSize
mbitmap.go#L1872: mask = make([]byte, n/goarch.PtrSize)
mbitmap.go#L1873: for i := uintptr(0); i < n; i += goarch.PtrSize {
mbitmap.go#L1874: off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
mbitmap.go#L1875: mask[i/goarch.PtrSize] = locals.ptrbit(off)
mcheckmark.go#L28: b [heapArenaBytes / goarch.PtrSize / 8]uint8
mfinal.go#L29: fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
mfinal.go#L49: finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
mfinal.go#L117: if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
mfinal.go#L119: unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
mfinal.go#L120: unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
mfinal.go#L121: unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
mfinal.go#L122: unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
mfinal.go#L528: nret = alignUp(nret, goarch.PtrSize)
mgcmark.go#L265: if rootBlockBytes%(8*goarch.PtrSize) != 0 {
mgcmark.go#L278: ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
mgcmark.go#L401: scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mgcmark.go#L405: scanblock(uintptr(unsafe.Pointer(&spw.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mgcmark.go#L409: scanblock(uintptr(unsafe.Pointer(&spc.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mgcmark.go#L899: scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L916: scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L921: scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L927: scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L1054: size := uintptr(locals.n) * goarch.PtrSize
mgcmark.go#L1060: scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
mgcmark.go#L1352: bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
mgcmark.go#L1354: i += goarch.PtrSize * 8
mgcmark.go#L1370: i += goarch.PtrSize
mgcmark.go#L1444: scanSize = addr - b + goarch.PtrSize
mgcmark.go#L1485: word := (p - b) / goarch.PtrSize
mgcmark.go#L1510: for i := uintptr(0); i < n; i += goarch.PtrSize {
mgcmark.go#L1512: word := i / goarch.PtrSize
mgcmark.go#L1521: if i%(goarch.PtrSize*8) != 0 {
mgcmark.go#L1524: i += goarch.PtrSize*8 - goarch.PtrSize
mgcmark.go#L1587: if obj&(goarch.PtrSize-1) != 0 {
mgcmark.go#L1659: size = off + goarch.PtrSize
mgcmark.go#L1661: for i := uintptr(0); i < size; i += goarch.PtrSize {
mgcmark.go#L1665: if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
mgcstack.go#L110: obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
mgcwork.go#L328: obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
mheap.go#L531: n := 64 * 1024 / goarch.PtrSize
mheap.go#L537: sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
mheap.go#L1414: s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
mheap.go#L2003: scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mheap.go#L2060: scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mheap.go#L2229: scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mprof.go#L592: pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
mranges.go#L258: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
mranges.go#L385: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
mranges.go#L455: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
mspanset.go#L104: newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
mspanset.go#L108: memmove(newSpine, spine.p, b.spineCap*goarch.PtrSize)
mspanset.go#L299: return (*atomic.Pointer[spanSetBlock])(add(s.p, goarch.PtrSize*idx))
mwbbuf.go#L132: if b.next+goarch.PtrSize > b.end {
mwbbuf.go#L136: b.next += goarch.PtrSize
mwbbuf.go#L143: if b.next+2*goarch.PtrSize > b.end {
mwbbuf.go#L147: b.next += 2 * goarch.PtrSize
os_linux.go#L251: auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
panic.go#L905: return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
panic.go#L1247: gp.sched.bp = fp - 2*goarch.PtrSize
panic.go#L1252: gp.sched.bp = sp - goarch.PtrSize
preempt.go#L323: asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
print.go#L273: for i := uintptr(0); p+i < end; i += goarch.PtrSize {
proc.go#L157: if goarch.PtrSize == 8 {
proc.go#L698: return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
proc.go#L2435: gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
proc.go#L5077: totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
proc.go#L5087: *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
proc.go#L7369: p := add(firstFunc, uintptr(i)*goarch.PtrSize)
runtime1.go#L63: return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
runtime1.go#L198: if unsafe.Sizeof(k) != goarch.PtrSize {
runtime1.go#L201: if unsafe.Sizeof(l) != goarch.PtrSize {
runtime2.go#L518: tlsSize = tlsSlots * goarch.PtrSize
runtime2.go#L629: _ [goexperiment.SpinbitMutexInt * 700 * (2 - goarch.PtrSize/4)]byte
signal_amd64.go#L83: sp -= goarch.PtrSize
signal_linux_amd64.go#L55: *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
slice.go#L216: case et.Size_ == goarch.PtrSize:
slice.go#L217: lenmem = uintptr(oldLen) * goarch.PtrSize
slice.go#L218: newlenmem = uintptr(newLen) * goarch.PtrSize
slice.go#L219: capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
slice.go#L220: overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
slice.go#L221: newcap = int(capmem / goarch.PtrSize)
slice.go#L224: if goarch.PtrSize == 8 {
stack.go#L122: uintptrMask = 1<<(8*goarch.PtrSize) - 1
stack.go#L625: print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
stack.go#L632: pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
stack.go#L671: if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
stack.go#L696: size := uintptr(locals.n) * goarch.PtrSize
stack.go#L726: for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize {
stack.go#L727: if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
stack.go#L754: if oldfp == gp.sched.sp-goarch.PtrSize {
stack.go#L755: memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
stack.go#L1041: sp -= goarch.PtrSize
stkframe.go#L79: return uintptr(argMap.n) * goarch.PtrSize
stkframe.go#L97: argMap.n = f.args / goarch.PtrSize
stkframe.go#L112: minSP -= goarch.PtrSize
stkframe.go#L137: retValid := *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
stkframe.go#L146: n := int32((mv.argLen &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
stkframe.go#L221: print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(args.n*goarch.PtrSize), "\n")
stkframe.go#L248: p = add(p, goarch.PtrSize)
symtab.go#L619: hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text {
symtab.go#L975: return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
symtab.go#L1175: if debugPcln && x&(goarch.PtrSize-1) != 0 {
sys_x86.go#L18: sp -= goarch.PtrSize
traceback.go#L182: frame.sp += goarch.PtrSize
traceback.go#L329: frame.fp += goarch.PtrSize
traceback.go#L377: lrPtr = frame.fp - goarch.PtrSize
traceback.go#L386: frame.varp -= goarch.PtrSize
traceback.go#L407: frame.varp -= goarch.PtrSize
traceback.go#L1288: const expand = 32 * goarch.PtrSize
traceback.go#L1289: const maxExpand = 256 * goarch.PtrSize
tracemap.go#L110: m = &n.children[hashIter>>(8*goarch.PtrSize-2)]
tracestack.go#L258: pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
tracetype.go#L30: id, _ := t.tab.put(noescape(unsafe.Pointer(&typ)), goarch.PtrSize)
type.go#L129: bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
type.go#L130: p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
type.go#L209: dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
type.go#L226: dst = dst.offset(e.Size_ / goarch.PtrSize)
type.go#L243: buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
type.go#L248: dst = dst.offset(bigField.Offset / goarch.PtrSize)