const internal/goarch.PtrSize
326 uses
internal/goarch (current package)
goarch.go#L35: const PtrSize = 4 << (^uintptr(0) >> 63)
goarch.go#L51: const Int64Align = PtrSize
goarch_amd64.go#L12: _StackAlign = PtrSize
internal/abi
abi.go#L75: if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
abi.go#L80: offset = goarch.PtrSize - argSize
internal/reflectlite
swapper.go#L40: if size == goarch.PtrSize {
value.go#L105: if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
internal/runtime/gc
malloc.go#L10: ptrBits = 8 * goarch.PtrSize
malloc.go#L46: MinSizeForMallocHeader = goarch.PtrSize * ptrBits
scan.go#L12: type ObjMask [MaxObjsPerSpan / (goarch.PtrSize * 8)]uintptr
scan.go#L15: type PtrMask [PageSize / goarch.PtrSize / (goarch.PtrSize * 8)]uintptr
internal/runtime/maps
map.go#L249: if goarch.PtrSize == 4 {
map.go#L342: return *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i))
map.go#L346: *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) = nt
runtime_faststr_swiss.go#L52: return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
runtime_faststr_swiss.go#L66: return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
runtime_faststr_swiss.go#L146: slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
runtime_faststr_swiss.go#L205: slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
table.go#L1145: if goarch.PtrSize == 4 {
internal/runtime/math
math.go#L20: if a|b < 1<<(4*goarch.PtrSize) || a == 0 {
internal/runtime/sys
consts.go#L25: const Int64Align = goarch.PtrSize
internal/sync
hashtriemap.go#L69: hashShift := 8 * goarch.PtrSize
hashtriemap.go#L98: hashShift = 8 * goarch.PtrSize
hashtriemap.go#L215: hashShift = 8 * goarch.PtrSize
hashtriemap.go#L341: if hashShift == 8*goarch.PtrSize {
hashtriemap.go#L403: if hashShift == 8*goarch.PtrSize {
hashtriemap.go#L430: hashShift = 8 * goarch.PtrSize
hash/maphash
maphash_runtime.go#L33: if goarch.PtrSize == 8 {
maphash_runtime.go#L60: if goarch.PtrSize == 8 {
reflect
abi.go#L170: ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
abi.go#L179: ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
abi.go#L183: a.stackAssign(goarch.PtrSize, goarch.PtrSize)
abi.go#L205: switch goarch.PtrSize {
abi.go#L218: return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
abi.go#L220: return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
abi.go#L222: return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
abi.go#L265: if ptrMap != 0 && size != goarch.PtrSize {
abi.go#L416: spill += goarch.PtrSize
abi.go#L433: spill = align(spill, goarch.PtrSize)
abi.go#L438: retOffset := align(in.stackBytes, goarch.PtrSize)
swapper.go#L41: if size == goarch.PtrSize {
type.go#L2061: ptrs := typ.PtrBytes / goarch.PtrSize
type.go#L2062: words := typ.Size_ / goarch.PtrSize
type.go#L2501: } else if typ.PtrBytes <= abi.MaxPtrmaskBytes*8*goarch.PtrSize {
type.go#L2662: case array.PtrBytes <= abi.MaxPtrmaskBytes*8*goarch.PtrSize:
type.go#L2664: n := (array.PtrBytes/goarch.PtrSize + 7) / 8
type.go#L2666: n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
type.go#L2779: Align_: goarch.PtrSize,
type.go#L2784: Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
type.go#L2785: PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
type.go#L2820: if bv.n%(8*goarch.PtrSize) == 0 {
type.go#L2824: for i := 0; i < goarch.PtrSize; i++ {
type.go#L2840: for bv.n < uint32(offset/goarch.PtrSize) {
type.go#L2847: for bv.n < uint32(offset/goarch.PtrSize) {
value.go#L112: if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
value.go#L565: frameSize = align(frameSize, goarch.PtrSize)
value.go#L1089: methodFrameSize = align(methodFrameSize, goarch.PtrSize)
runtime
alg.go#L17: c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
alg.go#L18: c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
alg.go#L371: const hashRandomBytes = goarch.PtrSize / 4 * 64
arena.go#L231: return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
arena.go#L558: nb := typ.PtrBytes / goarch.PtrSize
arena.go#L606: h.low = offset / goarch.PtrSize % ptrBits
arena.go#L609: h.offset = offset - h.low*goarch.PtrSize
arena.go#L635: idx := h.offset / (ptrBits * goarch.PtrSize)
arena.go#L645: h.offset += ptrBits * goarch.PtrSize
arena.go#L655: words := size / goarch.PtrSize
arena.go#L671: zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
arena.go#L685: idx := h.offset / (ptrBits * goarch.PtrSize)
arena.go#L698: h.offset += ptrBits * goarch.PtrSize
arena.go#L707: idx := h.offset / (ptrBits * goarch.PtrSize)
arena.go#L718: h.offset += ptrBits * goarch.PtrSize
arena.go#L726: if goarch.PtrSize == 8 {
cgocall.go#L648: p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
cgocheck.go#L154: skipMask := off / goarch.PtrSize / 8
cgocheck.go#L155: skipBytes := skipMask * goarch.PtrSize * 8
cgocheck.go#L161: for i := uintptr(0); i < size; i += goarch.PtrSize {
cgocheck.go#L162: if i&(goarch.PtrSize*8-1) == 0 {
cgocheck.go#L169: off -= goarch.PtrSize
heapdump.go#L250: dumpint(uint64(offset + i*goarch.PtrSize))
heapdump.go#L300: for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
heapdump.go#L309: for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
heapdump.go#L316: for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
heapdump.go#L323: dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
heapdump.go#L516: dumpint(goarch.PtrSize)
heapdump.go#L725: nptr := size / goarch.PtrSize
heapdump.go#L746: i := (addr - p) / goarch.PtrSize
iface.go#L77: m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
iface.go#L114: p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
iface.go#L147: t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
iface.go#L175: p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
iface.go#L687: m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
lock_spinbit.go#L76: return &(*[8]uint8)(unsafe.Pointer(p))[goarch.PtrSize/1-1]
malloc.go#L149: _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
malloc.go#L250: heapArenaWords = heapArenaBytes / goarch.PtrSize
malloc.go#L258: heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
malloc.go#L454: if gc.MinSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
malloc.go#L478: } else if goarch.PtrSize == 8 {
malloc.go#L790: r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata"))
malloc.go#L792: r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L800: size := 2 * uintptr(cap((*arenaList))) * goarch.PtrSize
malloc.go#L804: newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L809: *(*notInHeapSlice)(unsafe.Pointer(&(*arenaList))) = notInHeapSlice{newArray, len((*arenaList)), int(size / goarch.PtrSize)}
malloc.go#L1160: } else if goarch.PtrSize == 4 && size == 12 {
malloc.go#L1384: if goarch.PtrSize == 8 && sizeclass == 1 {
malloc.go#L1980: persistent.off = alignUp(goarch.PtrSize, align)
mbarrier.go#L250: if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize {
mbitmap.go#L219: if goarch.PtrSize == 8 {
mbitmap.go#L232: return tp, tp.addr + uintptr(i)*goarch.PtrSize
mbitmap.go#L255: if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
mbitmap.go#L259: tp.addr += ptrBits * goarch.PtrSize
mbitmap.go#L268: tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
mbitmap.go#L269: if tp.addr+goarch.PtrSize*ptrBits > limit {
mbitmap.go#L270: bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
mbitmap.go#L292: tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
mbitmap.go#L294: if tp.addr+goarch.PtrSize*ptrBits > limit {
mbitmap.go#L295: bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
mbitmap.go#L308: tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
mbitmap.go#L310: tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
mbitmap.go#L327: tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
mbitmap.go#L328: tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
mbitmap.go#L330: if tp.addr+goarch.PtrSize*ptrBits > limit {
mbitmap.go#L331: bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
mbitmap.go#L389: if (dst|src|size)&(goarch.PtrSize-1) != 0 {
mbitmap.go#L475: if (dst|src|size)&(goarch.PtrSize-1) != 0 {
mbitmap.go#L509: if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
mbitmap.go#L563: elems := int(bitmapSize / goarch.PtrSize)
mbitmap.go#L571: size = spanSize / goarch.PtrSize / 8
mbitmap.go#L597: i := (addr - span.base()) / goarch.PtrSize / ptrBits
mbitmap.go#L598: j := (addr - span.base()) / goarch.PtrSize % ptrBits
mbitmap.go#L599: bits := span.elemsize / goarch.PtrSize
mbitmap.go#L600: word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
mbitmap.go#L601: word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
mbitmap.go#L631: if typ.Size_ == goarch.PtrSize {
mbitmap.go#L632: src = (1 << (dataSize / goarch.PtrSize)) - 1
mbitmap.go#L641: src |= src0 << (i / goarch.PtrSize)
mbitmap.go#L647: src &= (1 << (dataSize / goarch.PtrSize)) - 1
mbitmap.go#L655: o := (x - span.base()) / goarch.PtrSize
mbitmap.go#L658: bits := span.elemsize / goarch.PtrSize
mbitmap.go#L663: dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
mbitmap.go#L664: dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
mbitmap.go#L669: dst := (*uintptr)(add(dst, i*goarch.PtrSize))
mbitmap.go#L792: off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
mbitmap.go#L795: off -= goarch.PtrSize
mbitmap.go#L796: size += goarch.PtrSize
mbitmap.go#L799: size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
mbitmap.go#L801: size = goarch.PtrSize
mbitmap.go#L819: for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
mbitmap.go#L825: j := off / goarch.PtrSize
mbitmap.go#L876: for i := off; i < off+size; i += goarch.PtrSize {
mbitmap.go#L882: j := off / goarch.PtrSize
mbitmap.go#L924: for i := off; i < off+size; i += goarch.PtrSize {
mbitmap.go#L930: j := off / goarch.PtrSize
mbitmap.go#L1364: const ptrBits = 8 * goarch.PtrSize
mbitmap.go#L1375: word := maskOffset / goarch.PtrSize
mbitmap.go#L1380: for i := uintptr(0); i < size; i += goarch.PtrSize {
mbitmap.go#L1385: i += 7 * goarch.PtrSize
mbitmap.go#L1434: for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
mbitmap.go#L1435: if i&(goarch.PtrSize*8-1) == 0 {
mbitmap.go#L1476: if goarch.PtrSize == 8 {
mbitmap.go#L1493: n := (size/goarch.PtrSize + 7) / 8
mbitmap.go#L1597: const maxBits = goarch.PtrSize*8 - 7
mbitmap.go#L1640: for nb <= goarch.PtrSize*8 {
mbitmap.go#L1790: mask = make([]byte, n/goarch.PtrSize)
mbitmap.go#L1791: for i := uintptr(0); i < n; i += goarch.PtrSize {
mbitmap.go#L1792: off := (uintptr(p) + i - datap.data) / goarch.PtrSize
mbitmap.go#L1793: mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
mbitmap.go#L1802: mask = make([]byte, n/goarch.PtrSize)
mbitmap.go#L1803: for i := uintptr(0); i < n; i += goarch.PtrSize {
mbitmap.go#L1804: off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
mbitmap.go#L1805: mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
mbitmap.go#L1825: maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
mbitmap.go#L1831: maskFromHeap[(addr-base)/goarch.PtrSize] = 1
mbitmap.go#L1850: maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
mbitmap.go#L1857: maskFromType[(addr-base)/goarch.PtrSize] = 1
mbitmap.go#L1918: size := uintptr(locals.n) * goarch.PtrSize
mbitmap.go#L1920: mask = make([]byte, n/goarch.PtrSize)
mbitmap.go#L1921: for i := uintptr(0); i < n; i += goarch.PtrSize {
mbitmap.go#L1922: off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
mbitmap.go#L1923: mask[i/goarch.PtrSize] = locals.ptrbit(off)
mcheckmark.go#L28: b [heapArenaBytes / goarch.PtrSize / 8]uint8
mcheckmark.go#L114: wordIdx := (obj - alignDown(obj, heapArenaBytes)) / goarch.PtrSize
mcleanup.go#L219: cleanups [(cleanupBlockSize - unsafe.Sizeof(cleanupBlockHeader{})) / goarch.PtrSize]*funcval
mcleanup.go#L222: var cleanupBlockPtrMask [cleanupBlockSize / goarch.PtrSize / 8]byte
mfinal.go#L32: fin [(finBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
mfinal.go#L51: finptrmask [finBlockSize / goarch.PtrSize / 8]byte
mfinal.go#L122: if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
mfinal.go#L124: unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
mfinal.go#L125: unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
mfinal.go#L126: unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
mfinal.go#L127: unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
mfinal.go#L531: nret = alignUp(nret, goarch.PtrSize)
mgc.go#L722: p.gcw.ptrBuf = (*[gc.PageSize / goarch.PtrSize]uintptr)(persistentalloc(gc.PageSize, goarch.PtrSize, &memstats.gcMiscSys))
mgcmark.go#L196: scanblock(uintptr(unsafe.Pointer(&cb.cleanups[0])), n*goarch.PtrSize, &cleanupBlockPtrMask[0], gcw, nil)
mgcmark.go#L273: if rootBlockBytes%(8*goarch.PtrSize) != 0 {
mgcmark.go#L286: ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
mgcmark.go#L403: scanblock(uintptr(unsafe.Pointer(&spw.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mgcmark.go#L428: scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mgcmark.go#L434: scanblock(uintptr(unsafe.Pointer(&spc.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mgcmark.go#L919: scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L936: scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L941: scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L947: scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L1074: size := uintptr(locals.n) * goarch.PtrSize
mgcmark.go#L1080: scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
mgcmark.go#L1411: bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
mgcmark.go#L1413: i += goarch.PtrSize * 8
mgcmark.go#L1433: i += goarch.PtrSize
mgcmark.go#L1507: scanSize = addr - b + goarch.PtrSize
mgcmark.go#L1553: word := (p - b) / goarch.PtrSize
mgcmark.go#L1578: for i := uintptr(0); i < n; i += goarch.PtrSize {
mgcmark.go#L1580: word := i / goarch.PtrSize
mgcmark.go#L1589: if i%(goarch.PtrSize*8) != 0 {
mgcmark.go#L1592: i += goarch.PtrSize*8 - goarch.PtrSize
mgcmark.go#L1662: if obj&(goarch.PtrSize-1) != 0 {
mgcmark.go#L1737: size = off + goarch.PtrSize
mgcmark.go#L1739: for i := uintptr(0); i < size; i += goarch.PtrSize {
mgcmark.go#L1743: if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
mgcstack.go#L110: obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
mgcwork.go#L109: ptrBuf *[pageSize / goarch.PtrSize]uintptr
mgcwork.go#L386: obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
mheap.go#L555: n := 64 * 1024 / goarch.PtrSize
mheap.go#L561: sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array")
mheap.go#L1469: reserve += nbytes / goarch.PtrSize / 8
mheap.go#L1475: s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
mheap.go#L2178: scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mheap.go#L2235: scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mheap.go#L2593: scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
mheap.go#L2701: hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize)
mheap.go#L2722: newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys))
mheap.go#L2736: m = &n.children[hashIter>>(8*goarch.PtrSize-1)]
mprof.go#L592: pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
mranges.go#L258: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
mranges.go#L385: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
mranges.go#L455: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
mspanset.go#L113: newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
mspanset.go#L117: memmove(newSpine, spine.p, b.spineCap*goarch.PtrSize)
mspanset.go#L308: return (*atomic.Pointer[spanSetBlock])(add(s.p, goarch.PtrSize*idx))
mwbbuf.go#L132: if b.next+goarch.PtrSize > b.end {
mwbbuf.go#L136: b.next += goarch.PtrSize
mwbbuf.go#L143: if b.next+2*goarch.PtrSize > b.end {
mwbbuf.go#L147: b.next += 2 * goarch.PtrSize
os_linux.go#L252: auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
panic.go#L896: return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
panic.go#L1247: gp.sched.bp = fp - 2*goarch.PtrSize
panic.go#L1252: gp.sched.bp = sp - goarch.PtrSize
preempt.go#L323: asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
print.go#L273: for i := uintptr(0); p+i < end; i += goarch.PtrSize {
proc.go#L158: if goarch.PtrSize == 8 {
proc.go#L723: return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
proc.go#L2520: gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
proc.go#L5197: totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
proc.go#L5207: *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
proc.go#L7668: p := add(firstFunc, uintptr(i)*goarch.PtrSize)
runtime1.go#L64: return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
runtime1.go#L199: if unsafe.Sizeof(k) != goarch.PtrSize {
runtime1.go#L202: if unsafe.Sizeof(l) != goarch.PtrSize {
runtime2.go#L522: tlsSize = tlsSlots * goarch.PtrSize
signal_amd64.go#L83: sp -= goarch.PtrSize
signal_linux_amd64.go#L55: *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
slice.go#L216: case et.Size_ == goarch.PtrSize:
slice.go#L217: lenmem = uintptr(oldLen) * goarch.PtrSize
slice.go#L218: newlenmem = uintptr(newLen) * goarch.PtrSize
slice.go#L219: capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
slice.go#L220: overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
slice.go#L221: newcap = int(capmem / goarch.PtrSize)
slice.go#L224: if goarch.PtrSize == 8 {
stack.go#L123: uintptrMask = 1<<(8*goarch.PtrSize) - 1
stack.go#L666: print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
stack.go#L673: pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
stack.go#L712: if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
stack.go#L737: size := uintptr(locals.n) * goarch.PtrSize
stack.go#L767: for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize {
stack.go#L768: if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
stack.go#L795: if oldfp == gp.sched.sp-goarch.PtrSize {
stack.go#L796: memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
stack.go#L1090: sp -= goarch.PtrSize
stkframe.go#L79: return uintptr(argMap.n) * goarch.PtrSize
stkframe.go#L97: argMap.n = f.args / goarch.PtrSize
stkframe.go#L112: minSP -= goarch.PtrSize
stkframe.go#L137: retValid := *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
stkframe.go#L146: n := int32((mv.argLen &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
stkframe.go#L221: print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(args.n*goarch.PtrSize), "\n")
stkframe.go#L248: p = add(p, goarch.PtrSize)
symtab.go#L619: hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text {
symtab.go#L1001: return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
symtab.go#L1204: if debugPcln && x&(goarch.PtrSize-1) != 0 {
sys_x86.go#L18: sp -= goarch.PtrSize
traceback.go#L182: frame.sp += goarch.PtrSize
traceback.go#L329: frame.fp += goarch.PtrSize
traceback.go#L377: lrPtr = frame.fp - goarch.PtrSize
traceback.go#L386: frame.varp -= goarch.PtrSize
traceback.go#L407: frame.varp -= goarch.PtrSize
traceback.go#L1315: const expand = 32 * goarch.PtrSize
traceback.go#L1316: const maxExpand = 256 * goarch.PtrSize
tracemap.go#L110: m = &n.children[hashIter>>(8*goarch.PtrSize-2)]
tracestack.go#L267: pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
tracetype.go#L31: id, _ := t.tab.put(noescape(unsafe.Pointer(&typ)), goarch.PtrSize)
type.go#L134: bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
type.go#L135: p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
type.go#L214: dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
type.go#L231: dst = dst.offset(e.Size_ / goarch.PtrSize)
type.go#L248: buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
type.go#L253: dst = dst.offset(bigField.Offset / goarch.PtrSize)
unique
canonmap.go#L41: hashShift := 8 * goarch.PtrSize
canonmap.go#L68: hashShift = 8 * goarch.PtrSize
canonmap.go#L188: hashShift = 8 * goarch.PtrSize
canonmap.go#L234: if hashShift == 8*goarch.PtrSize {