internal/abi.Type.Size_ (field)

151 uses

	internal/abi (current package)
		type.go#L21: 	Size_       uintptr
		type.go#L446: func (t *Type) Size() uintptr { return t.Size_ }

	internal/runtime/maps
		runtime_swiss.go#L51: 		msan.Read(key, typ.Key.Size_)
		runtime_swiss.go#L54: 		asan.Read(key, typ.Key.Size_)
		runtime_swiss.go#L125: 		msan.Read(key, typ.Key.Size_)
		runtime_swiss.go#L128: 		asan.Read(key, typ.Key.Size_)
		runtime_swiss.go#L202: 		msan.Read(key, typ.Key.Size_)
		runtime_swiss.go#L205: 		asan.Read(key, typ.Key.Size_)
		table_debug.go#L51: 					dump(key, typ.Key.Size_)
		table_debug.go#L114: 			dump(g.key(typ, j), typ.Key.Size_)
		table_debug.go#L117: 			dump(g.elem(typ, j), typ.Elem.Size_)

	reflect
		map_swiss.go#L86: 	if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
		map_swiss.go#L89: 	if etyp.Size_ > abi.SwissMapMaxKeyBytes {
		type.go#L587: 	return int(t.t.Size_) * 8
		type.go#L1763: 	if typ.Size_ >= 1<<16 {
		type.go#L2046: 	words := typ.Size_ / goarch.PtrSize
		type.go#L2346: 		size = offset + ft.Size_
		type.go#L2352: 		if ft.Size_ == 0 {
		type.go#L2474: 	typ.Size_ = size
		type.go#L2618: 	if typ.Size_ > 0 {
		type.go#L2619: 		max := ^uintptr(0) / typ.Size_
		type.go#L2624: 	array.Size_ = typ.Size_ * uintptr(length)
		type.go#L2626: 		array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes
		type.go#L2768: 		Size_:    align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
		type.go#L2841: 			addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem)

	runtime
		alg.go#L204: 		switch t.Size_ {
		alg.go#L210: 			return memhash(p, h, t.Size_)
		alg.go#L233: 			h = typehash(a.Elem, add(p, i*a.Elem.Size_), h)
		alg.go#L298: 			if err := mapKeyError2(a.Elem, add(p, i*a.Elem.Size_)); err != nil {
		arena.go#L450: 	size := typ.Size_
		arena.go#L516: 			c.scanAlloc += size - (typ.Size_ - typ.PtrBytes)
		arena.go#L540: 	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
		arena.go#L545: 		userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s)
		arena.go#L579: 	h = h.pad(s, typ.Size_-typ.PtrBytes)
		arena.go#L580: 	h.flush(s, uintptr(ptr), typ.Size_)
		arena.go#L589: 		doubleCheckHeapPointersInterior(uintptr(ptr), uintptr(ptr), typ.Size_, typ.Size_, typ, &s.largeType, s)
		arena.go#L1118: 	s.largeType.Size_ = s.elemsize
		cgocall.go#L622: 			p = add(p, at.Elem.Size_)
		cgocall.go#L671: 			p = add(p, st.Elem.Size_)
		cgocheck.go#L80: 	cgoCheckMemmove2(typ, dst, src, 0, typ.Size_)
		cgocheck.go#L124: 		cgoCheckTypedBlock(typ, p, 0, typ.Size_)
		cgocheck.go#L125: 		p = add(p, typ.Size_)
		chan.go#L79: 	if elem.Size_ >= 1<<16 {
		chan.go#L86: 	mem, overflow := math.MulUintptr(elem.Size_, uintptr(size))
		chan.go#L113: 	c.elemsize = uint16(elem.Size_)
		chan.go#L122: 		print("makechan: chan=", c, "; elemsize=", elem.Size_, "; dataqsiz=", size, "\n")
		chan.go#L399: 	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
		chan.go#L402: 	memmove(dst, src, t.Size_)
		chan.go#L410: 	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
		chan.go#L411: 	memmove(dst, src, t.Size_)
		checkptr.go#L25: 	if checkptrStraddles(p, n*elem.Size_) {
		heapdump.go#L196: 	dumpint(uint64(t.Size_))
		iface.go#L339: 		msanread(v, t.Size_)
		iface.go#L342: 		asanread(v, t.Size_)
		iface.go#L344: 	x := mallocgc(t.Size_, t, true)
		iface.go#L354: 		msanread(v, t.Size_)
		iface.go#L357: 		asanread(v, t.Size_)
		iface.go#L360: 	x := mallocgc(t.Size_, t, false)
		iface.go#L361: 	memmove(x, v, t.Size_)
		malloc.go#L1714: 	return mallocgc(typ.Size_, typ, true)
		malloc.go#L1735: 	return mallocgc(typ.Size_, typ, true)
		malloc.go#L1740: 	return mallocgc(typ.Size_, typ, true)
		malloc.go#L1758: 		return mallocgc(typ.Size_, typ, true)
		malloc.go#L1760: 	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
		map_swiss.go#L152: 		msanread(key, t.Key.Size_)
		map_swiss.go#L155: 		asanread(key, t.Key.Size_)
		mbarrier.go#L167: 	memmove(dst, src, typ.Size_)
		mbarrier.go#L169: 		cgoCheckMemmove2(typ, dst, src, 0, typ.Size_)
		mbarrier.go#L219: 		msanwrite(dst, typ.Size_)
		mbarrier.go#L220: 		msanread(src, typ.Size_)
		mbarrier.go#L223: 		asanwrite(dst, typ.Size_)
		mbarrier.go#L224: 		asanread(src, typ.Size_)
		mbarrier.go#L292: 		racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
		mbarrier.go#L293: 		racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
		mbarrier.go#L296: 		msanwrite(dstPtr, uintptr(n)*typ.Size_)
		mbarrier.go#L297: 		msanread(srcPtr, uintptr(n)*typ.Size_)
		mbarrier.go#L300: 		asanwrite(dstPtr, uintptr(n)*typ.Size_)
		mbarrier.go#L301: 		asanread(srcPtr, uintptr(n)*typ.Size_)
		mbarrier.go#L316: 	size := uintptr(n) * typ.Size_
		mbarrier.go#L321: 		pwsize := size - typ.Size_ + typ.PtrBytes
		mbarrier.go#L345: 		return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
		mbarrier.go#L368: 	memclrNoHeapPointers(ptr, typ.Size_)
		mbarrier.go#L403: 	size := typ.Size_ * uintptr(len)
		mbitmap.go#L285: 			tp.elem += tp.typ.Size_
		mbitmap.go#L332: 	if n >= tp.typ.Size_ {
		mbitmap.go#L336: 		tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
		mbitmap.go#L345: 		tp.elem += tp.typ.Size_
		mbitmap.go#L648: 	if typ.Size_ == goarch.PtrSize {
		mbitmap.go#L654: 		if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
		mbitmap.go#L657: 		for i := typ.Size_; i < dataSize; i += typ.Size_ {
		mbitmap.go#L659: 			scanSize += typ.Size_
		mbitmap.go#L694: 			print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
		mbitmap.go#L772: 	size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
		mbitmap.go#L791: 			off := i % typ.Size_
		mbitmap.go#L817: 	print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " TFlagGCMaskOnDemaind=", typ.TFlag&abi.TFlagGCMaskOnDemand != 0, "\n")
		mbitmap.go#L848: 			off := i % typ.Size_
		mbitmap.go#L875: 	print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
		mbitmap.go#L896: 			off := i % typ.Size_
		mbitmap.go#L1376: 	if typ.Size_ != size {
		mbitmap.go#L1377: 		println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
		mbitmap.go#L1741: 			n := et.Size_
		mbitmap.go#L1753: 			n := et.Size_
		mbitmap.go#L1871: 			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
		mfinal.go#L475: 		if ot.Elem == nil || ot.Elem.Pointers() || ot.Elem.Size_ >= maxTinySize {
		mfinal.go#L526: 		nret = alignUp(nret, uintptr(t.Align_)) + t.Size_
		select.go#L430: 			msanread(cas.elem, c.elemtype.Size_)
		select.go#L432: 			msanwrite(cas.elem, c.elemtype.Size_)
		select.go#L437: 			asanread(cas.elem, c.elemtype.Size_)
		select.go#L439: 			asanwrite(cas.elem, c.elemtype.Size_)
		select.go#L455: 		msanwrite(cas.elem, c.elemtype.Size_)
		select.go#L458: 		asanwrite(cas.elem, c.elemtype.Size_)
		select.go#L481: 		msanread(cas.elem, c.elemtype.Size_)
		select.go#L484: 		asanread(cas.elem, c.elemtype.Size_)
		select.go#L522: 		msanread(cas.elem, c.elemtype.Size_)
		select.go#L525: 		asanread(cas.elem, c.elemtype.Size_)
		slice.go#L42: 		tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen))
		slice.go#L46: 		copymem = et.Size_ * uintptr(fromlen)
		slice.go#L51: 		tomem = et.Size_ * uintptr(tolen)
		slice.go#L102: 	mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
		slice.go#L109: 		mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
		slice.go#L181: 		racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
		slice.go#L184: 		msanread(oldPtr, uintptr(oldLen*int(et.Size_)))
		slice.go#L187: 		asanread(oldPtr, uintptr(oldLen*int(et.Size_)))
		slice.go#L194: 	if et.Size_ == 0 {
		slice.go#L210: 	case et.Size_ == 1:
		slice.go#L216: 	case et.Size_ == goarch.PtrSize:
		slice.go#L222: 	case isPowerOfTwo(et.Size_):
		slice.go#L226: 			shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63
		slice.go#L228: 			shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31
		slice.go#L237: 		lenmem = uintptr(oldLen) * et.Size_
		slice.go#L238: 		newlenmem = uintptr(newLen) * et.Size_
		slice.go#L239: 		capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
		slice.go#L241: 		newcap = int(capmem / et.Size_)
		slice.go#L242: 		capmem = uintptr(newcap) * et.Size_
		slice.go#L280: 			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et)
		slice.go#L342: 		oldcapmem := uintptr(old.cap) * et.Size_
		slice.go#L343: 		newlenmem := uintptr(new.len) * et.Size_
		stkframe.go#L281: 		off:       -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local.
		stkframe.go#L282: 		size:      int32(abiRegArgsType.Size_),
		type.go#L226: 			dst = dst.offset(e.Size_ / goarch.PtrSize)
		type.go#L236: 			if ft.Size_ > t.Size_/2 {
		unsafe.go#L59: 	if et.Size_ == 0 {
		unsafe.go#L65: 	mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
		unsafe.go#L88: 	if checkptrStraddles(ptr, uintptr(len64)*et.Size_) {