package runtime
import (
)
const (
_StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
_StackMin = 2048
_FixedStack0 = _StackMin + _StackSystem
_FixedStack1 = _FixedStack0 - 1
_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
_FixedStack = _FixedStack6 + 1
_StackBig = 4096
_StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
_StackSmall = 128
_StackLimit = _StackGuard - _StackSystem - _StackSmall
)
const (
stackDebug = 0
stackFromSystem = 0
stackFaultOnFree = 0
stackPoisonCopy = 0
stackNoCache = 0
debugCheckBP = false
)
const (
uintptrMask = 1<<(8*goarch.PtrSize) - 1
stackPreempt = uintptrMask & -1314
stackFork = uintptrMask & -1234
stackForceMove = uintptrMask & -275
stackPoisonMin = uintptrMask & -4096
)
var stackpool [_NumStackOrders]struct {
item stackpoolItem
_ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
}
type stackpoolItem struct {
mu mutex
span mSpanList
}
var stackLarge struct {
lock mutex
free [heapAddrBits - pageShift]mSpanList
}
func () {
if _StackCacheSize&_PageMask != 0 {
throw("cache size must be a multiple of page size")
}
for := range stackpool {
stackpool[].item.span.init()
lockInit(&stackpool[].item.mu, lockRankStackpool)
}
for := range stackLarge.free {
stackLarge.free[].init()
lockInit(&stackLarge.lock, lockRankStackLarge)
}
}
func ( uintptr) int {
:= 0
for > 1 {
>>= 1
++
}
return
}
func ( uint8) gclinkptr {
:= &stackpool[].item.span
:= .first
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if == nil {
= mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
if == nil {
throw("out of memory")
}
if .allocCount != 0 {
throw("bad allocCount")
}
if .manualFreeList.ptr() != nil {
throw("bad manualFreeList")
}
osStackAlloc()
.elemsize = _FixedStack <<
for := uintptr(0); < _StackCacheSize; += .elemsize {
:= gclinkptr(.base() + )
.ptr().next = .manualFreeList
.manualFreeList =
}
.insert()
}
:= .manualFreeList
if .ptr() == nil {
throw("span has no free stacks")
}
.manualFreeList = .ptr().next
.allocCount++
if .manualFreeList.ptr() == nil {
.remove()
}
return
}
func ( gclinkptr, uint8) {
:= spanOfUnchecked(uintptr())
if .state.get() != mSpanManual {
throw("freeing stack not in a stack span")
}
if .manualFreeList.ptr() == nil {
stackpool[].item.span.insert()
}
.ptr().next = .manualFreeList
.manualFreeList =
.allocCount--
if gcphase == _GCoff && .allocCount == 0 {
stackpool[].item.span.remove()
.manualFreeList = 0
osStackFree()
mheap_.freeManual(, spanAllocStack)
}
}
func ( *mcache, uint8) {
if stackDebug >= 1 {
print("stackcacherefill order=", , "\n")
}
var gclinkptr
var uintptr
lock(&stackpool[].item.mu)
for < _StackCacheSize/2 {
:= stackpoolalloc()
.ptr().next =
=
+= _FixedStack <<
}
unlock(&stackpool[].item.mu)
.stackcache[].list =
.stackcache[].size =
}
func ( *mcache, uint8) {
if stackDebug >= 1 {
print("stackcacherelease order=", , "\n")
}
:= .stackcache[].list
:= .stackcache[].size
lock(&stackpool[].item.mu)
for > _StackCacheSize/2 {
:= .ptr().next
stackpoolfree(, )
=
-= _FixedStack <<
}
unlock(&stackpool[].item.mu)
.stackcache[].list =
.stackcache[].size =
}
func ( *mcache) {
if stackDebug >= 1 {
print("stackcache clear\n")
}
for := uint8(0); < _NumStackOrders; ++ {
lock(&stackpool[].item.mu)
:= .stackcache[].list
for .ptr() != nil {
:= .ptr().next
stackpoolfree(, )
=
}
.stackcache[].list = 0
.stackcache[].size = 0
unlock(&stackpool[].item.mu)
}
}
func ( uint32) stack {
:= getg()
if != .m.g0 {
throw("stackalloc not on scheduler stack")
}
if &(-1) != 0 {
throw("stack size not a power of 2")
}
if stackDebug >= 1 {
print("stackalloc ", , "\n")
}
if debug.efence != 0 || stackFromSystem != 0 {
= uint32(alignUp(uintptr(), physPageSize))
:= sysAlloc(uintptr(), &memstats.stacks_sys)
if == nil {
throw("out of memory (stackalloc)")
}
return stack{uintptr(), uintptr() + uintptr()}
}
var unsafe.Pointer
if < _FixedStack<<_NumStackOrders && < _StackCacheSize {
:= uint8(0)
:=
for > _FixedStack {
++
>>= 1
}
var gclinkptr
if stackNoCache != 0 || .m.p == 0 || .m.preemptoff != "" {
lock(&stackpool[].item.mu)
= stackpoolalloc()
unlock(&stackpool[].item.mu)
} else {
:= .m.p.ptr().mcache
= .stackcache[].list
if .ptr() == nil {
stackcacherefill(, )
= .stackcache[].list
}
.stackcache[].list = .ptr().next
.stackcache[].size -= uintptr()
}
= unsafe.Pointer()
} else {
var *mspan
:= uintptr() >> _PageShift
:= stacklog2()
lock(&stackLarge.lock)
if !stackLarge.free[].isEmpty() {
= stackLarge.free[].first
stackLarge.free[].remove()
}
unlock(&stackLarge.lock)
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if == nil {
= mheap_.allocManual(, spanAllocStack)
if == nil {
throw("out of memory")
}
osStackAlloc()
.elemsize = uintptr()
}
= unsafe.Pointer(.base())
}
if raceenabled {
racemalloc(, uintptr())
}
if msanenabled {
msanmalloc(, uintptr())
}
if asanenabled {
asanunpoison(, uintptr())
}
if stackDebug >= 1 {
print(" allocated ", , "\n")
}
return stack{uintptr(), uintptr() + uintptr()}
}
func ( stack) {
:= getg()
:= unsafe.Pointer(.lo)
:= .hi - .lo
if &(-1) != 0 {
throw("stack not a power of 2")
}
if .lo+ < .hi {
throw("bad stack size")
}
if stackDebug >= 1 {
println("stackfree", , )
memclrNoHeapPointers(, )
}
if debug.efence != 0 || stackFromSystem != 0 {
if debug.efence != 0 || stackFaultOnFree != 0 {
sysFault(, )
} else {
sysFree(, , &memstats.stacks_sys)
}
return
}
if msanenabled {
msanfree(, )
}
if asanenabled {
asanpoison(, )
}
if < _FixedStack<<_NumStackOrders && < _StackCacheSize {
:= uint8(0)
:=
for > _FixedStack {
++
>>= 1
}
:= gclinkptr()
if stackNoCache != 0 || .m.p == 0 || .m.preemptoff != "" {
lock(&stackpool[].item.mu)
stackpoolfree(, )
unlock(&stackpool[].item.mu)
} else {
:= .m.p.ptr().mcache
if .stackcache[].size >= _StackCacheSize {
stackcacherelease(, )
}
.ptr().next = .stackcache[].list
.stackcache[].list =
.stackcache[].size +=
}
} else {
:= spanOfUnchecked(uintptr())
if .state.get() != mSpanManual {
println(hex(.base()), )
throw("bad span state")
}
if gcphase == _GCoff {
osStackFree()
mheap_.freeManual(, spanAllocStack)
} else {
:= stacklog2(.npages)
lock(&stackLarge.lock)
stackLarge.free[].insert()
unlock(&stackLarge.lock)
}
}
}
var maxstacksize uintptr = 1 << 20
var maxstackceiling = maxstacksize
var ptrnames = []string{
0: "scalar",
1: "ptr",
}
type adjustinfo struct {
old stack
delta uintptr
cache pcvalueCache
sghi uintptr
}
func ( *adjustinfo, unsafe.Pointer) {
:= (*uintptr)()
:= *
if stackDebug >= 4 {
print(" ", , ":", hex(), "\n")
}
if .old.lo <= && < .old.hi {
* = + .delta
if stackDebug >= 3 {
print(" adjust ptr ", , ":", hex(), " -> ", hex(*), "\n")
}
}
}
type bitvector struct {
n int32
bytedata *uint8
}
func ( *bitvector) ( uintptr) uint8 {
:= *(addb(.bytedata, /8))
return ( >> ( % 8)) & 1
}
func ( unsafe.Pointer, *bitvector, *adjustinfo, funcInfo) {
:= .old.lo
:= .old.hi
:= .delta
:= uintptr(.n)
:= uintptr() < .sghi
for := uintptr(0); < ; += 8 {
if stackDebug >= 4 {
for := uintptr(0); < 8; ++ {
print(" ", add(, (+)*goarch.PtrSize), ":", ptrnames[.ptrbit(+)], ":", hex(*(*uintptr)(add(, (+)*goarch.PtrSize))), " # ", , " ", *addb(.bytedata, /8), "\n")
}
}
:= *(addb(.bytedata, /8))
for != 0 {
:= uintptr(sys.Ctz8())
&= - 1
:= (*uintptr)(add(, (+)*goarch.PtrSize))
:
:= *
if .valid() && 0 < && < minLegalPointer && debug.invalidptr != 0 {
getg().m.traceback = 2
print("runtime: bad pointer in frame ", funcname(), " at ", , ": ", hex(), "\n")
throw("invalid pointer found on stack")
}
if <= && < {
if stackDebug >= 3 {
print("adjust ptr ", hex(), " ", funcname(), "\n")
}
if {
:= (*unsafe.Pointer)(unsafe.Pointer())
if !atomic.Casp1(, unsafe.Pointer(), unsafe.Pointer(+)) {
goto
}
} else {
* = +
}
}
}
}
}
func ( *stkframe, unsafe.Pointer) bool {
:= (*adjustinfo)()
if .continpc == 0 {
return true
}
:= .fn
if stackDebug >= 2 {
print(" adjusting ", funcname(), " frame=[", hex(.sp), ",", hex(.fp), "] pc=", hex(.pc), " continpc=", hex(.continpc), "\n")
}
if .funcID == funcID_systemstack_switch {
return true
}
, , := getStackMap(, &.cache, true)
if .n > 0 {
:= uintptr(.n) * goarch.PtrSize
adjustpointers(unsafe.Pointer(.varp-), &, , )
}
if goarch.ArchFamily == goarch.AMD64 && .argp-.varp == 2*goarch.PtrSize {
if stackDebug >= 3 {
print(" saved bp\n")
}
if debugCheckBP {
:= *(*uintptr)(unsafe.Pointer(.varp))
if != 0 && ( < .old.lo || >= .old.hi) {
println("runtime: found invalid frame pointer")
print("bp=", hex(), " min=", hex(.old.lo), " max=", hex(.old.hi), "\n")
throw("bad frame pointer")
}
}
adjustpointer(, unsafe.Pointer(.varp))
}
if .n > 0 {
if stackDebug >= 3 {
print(" args\n")
}
adjustpointers(unsafe.Pointer(.argp), &, , funcInfo{})
}
if .varp != 0 {
for := range {
:= &[]
:= .off
:= .varp
if >= 0 {
= .argp
}
:= + uintptr()
if < .sp {
continue
}
:= .ptrdata()
:= .gcdata()
var *mspan
if .useGCProg() {
= materializeGCProg(, )
= (*byte)(unsafe.Pointer(.startAddr))
}
for := uintptr(0); < ; += goarch.PtrSize {
if *addb(, /(8*goarch.PtrSize))>>(/goarch.PtrSize&7)&1 != 0 {
adjustpointer(, unsafe.Pointer(+))
}
}
if != nil {
dematerializeGCProg()
}
}
}
return true
}
func ( *g, *adjustinfo) {
adjustpointer(, unsafe.Pointer(&.sched.ctxt))
if !framepointer_enabled {
return
}
if debugCheckBP {
:= .sched.bp
if != 0 && ( < .old.lo || >= .old.hi) {
println("runtime: found invalid top frame pointer")
print("bp=", hex(), " min=", hex(.old.lo), " max=", hex(.old.hi), "\n")
throw("bad top frame pointer")
}
}
adjustpointer(, unsafe.Pointer(&.sched.bp))
}
func ( *g, *adjustinfo) {
adjustpointer(, unsafe.Pointer(&._defer))
for := ._defer; != nil; = .link {
adjustpointer(, unsafe.Pointer(&.fn))
adjustpointer(, unsafe.Pointer(&.sp))
adjustpointer(, unsafe.Pointer(&._panic))
adjustpointer(, unsafe.Pointer(&.link))
adjustpointer(, unsafe.Pointer(&.varp))
adjustpointer(, unsafe.Pointer(&.fd))
}
}
func ( *g, *adjustinfo) {
adjustpointer(, unsafe.Pointer(&._panic))
}
func ( *g, *adjustinfo) {
for := .waiting; != nil; = .waitlink {
adjustpointer(, unsafe.Pointer(&.elem))
}
}
func ( stack, byte) {
for := .lo; < .hi; ++ {
*(*byte)(unsafe.Pointer()) =
}
}
func ( *g, stack) uintptr {
var uintptr
for := .waiting; != nil; = .waitlink {
:= uintptr(.elem) + uintptr(.c.elemsize)
if .lo <= && < .hi && > {
=
}
}
return
}
func ( *g, uintptr, *adjustinfo) uintptr {
if .waiting == nil {
return 0
}
var *hchan
for := .waiting; != nil; = .waitlink {
if .c != {
lockWithRank(&.c.lock, lockRankHchanLeaf)
}
= .c
}
adjustsudogs(, )
var uintptr
if .sghi != 0 {
:= .old.hi -
:= + .delta
= .sghi -
memmove(unsafe.Pointer(), unsafe.Pointer(), )
}
= nil
for := .waiting; != nil; = .waitlink {
if .c != {
unlock(&.c.lock)
}
= .c
}
return
}
func ( *g, uintptr) {
if .syscallsp != 0 {
throw("stack growth not allowed in system call")
}
:= .stack
if .lo == 0 {
throw("nil stackbase")
}
:= .hi - .sched.sp
gcController.addScannableStack(getg().m.p.ptr(), int64()-int64(.hi-.lo))
:= stackalloc(uint32())
if stackPoisonCopy != 0 {
fillstack(, 0xfd)
}
if stackDebug >= 1 {
print("copystack gp=", , " [", hex(.lo), " ", hex(.hi-), " ", hex(.hi), "]", " -> [", hex(.lo), " ", hex(.hi-), " ", hex(.hi), "]/", , "\n")
}
var adjustinfo
.old =
.delta = .hi - .hi
:=
if !.activeStackChans {
if < .hi-.lo && atomic.Load8(&.parkingOnChan) != 0 {
throw("racy sudog adjustment due to parking on channel")
}
adjustsudogs(, &)
} else {
.sghi = findsghi(, )
-= syncadjustsudogs(, , &)
}
memmove(unsafe.Pointer(.hi-), unsafe.Pointer(.hi-), )
adjustctxt(, &)
adjustdefers(, &)
adjustpanics(, &)
if .sghi != 0 {
.sghi += .delta
}
.stack =
.stackguard0 = .lo + _StackGuard
.sched.sp = .hi -
.stktopsp += .delta
gentraceback(^uintptr(0), ^uintptr(0), 0, , 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&)), 0)
if stackPoisonCopy != 0 {
fillstack(, 0xfc)
}
stackfree()
}
func ( int32) int32 {
:= uint(0)
for 1<< < {
++
}
return 1 <<
}
func () {
:= getg()
if .m.morebuf.g.ptr().stackguard0 == stackFork {
throw("stack growth after fork")
}
if .m.morebuf.g.ptr() != .m.curg {
print("runtime: newstack called from g=", hex(.m.morebuf.g), "\n"+"\tm=", .m, " m->curg=", .m.curg, " m->g0=", .m.g0, " m->gsignal=", .m.gsignal, "\n")
:= .m.morebuf
traceback(.pc, .sp, .lr, .g.ptr())
throw("runtime: wrong goroutine in newstack")
}
:= .m.curg
if .m.curg.throwsplit {
:= .m.morebuf
.syscallsp = .sp
.syscallpc = .pc
, := "(unknown)", uintptr(0)
:= findfunc(.sched.pc)
if .valid() {
= funcname()
= .sched.pc - .entry()
}
print("runtime: newstack at ", , "+", hex(),
" sp=", hex(.sched.sp), " stack=[", hex(.stack.lo), ", ", hex(.stack.hi), "]\n",
"\tmorebuf={pc:", hex(.pc), " sp:", hex(.sp), " lr:", hex(.lr), "}\n",
"\tsched={pc:", hex(.sched.pc), " sp:", hex(.sched.sp), " lr:", hex(.sched.lr), " ctxt:", .sched.ctxt, "}\n")
.m.traceback = 2
traceback(.pc, .sp, .lr, )
throw("runtime: stack split at bad time")
}
:= .m.morebuf
.m.morebuf.pc = 0
.m.morebuf.lr = 0
.m.morebuf.sp = 0
.m.morebuf.g = 0
:= atomic.Loaduintptr(&.stackguard0)
:= == stackPreempt
if {
if !canPreemptM(.m) {
.stackguard0 = .stack.lo + _StackGuard
gogo(&.sched)
}
}
if .stack.lo == 0 {
throw("missing stack in newstack")
}
:= .sched.sp
if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
-= goarch.PtrSize
}
if stackDebug >= 1 || < .stack.lo {
print("runtime: newstack sp=", hex(), " stack=[", hex(.stack.lo), ", ", hex(.stack.hi), "]\n",
"\tmorebuf={pc:", hex(.pc), " sp:", hex(.sp), " lr:", hex(.lr), "}\n",
"\tsched={pc:", hex(.sched.pc), " sp:", hex(.sched.sp), " lr:", hex(.sched.lr), " ctxt:", .sched.ctxt, "}\n")
}
if < .stack.lo {
print("runtime: gp=", , ", goid=", .goid, ", gp->status=", hex(readgstatus()), "\n ")
print("runtime: split stack overflow: ", hex(), " < ", hex(.stack.lo), "\n")
throw("runtime: split stack overflow")
}
if {
if == .m.g0 {
throw("runtime: preempt g0")
}
if .m.p == 0 && .m.locks == 0 {
throw("runtime: g is running but p is not")
}
if .preemptShrink {
.preemptShrink = false
shrinkstack()
}
if .preemptStop {
preemptPark()
}
gopreempt_m()
}
:= .stack.hi - .stack.lo
:= * 2
if := findfunc(.sched.pc); .valid() {
:= uintptr(funcMaxSPDelta())
:= + _StackGuard
:= .stack.hi - .sched.sp
for - < {
*= 2
}
}
if == stackForceMove {
=
}
if > maxstacksize || > maxstackceiling {
if maxstacksize < maxstackceiling {
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
} else {
print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
}
print("runtime: sp=", hex(), " stack=[", hex(.stack.lo), ", ", hex(.stack.hi), "]\n")
throw("stack overflow")
}
casgstatus(, _Grunning, _Gcopystack)
copystack(, )
if stackDebug >= 1 {
print("stack grow done\n")
}
casgstatus(, _Gcopystack, _Grunning)
gogo(&.sched)
}
func () {
*(*uint8)(nil) = 0
}
func ( *gobuf, *funcval) {
var unsafe.Pointer
if != nil {
= unsafe.Pointer(.fn)
} else {
= unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
}
gostartcall(, , unsafe.Pointer())
}
func ( *g) bool {
return .syscallsp == 0 && !.asyncSafePoint && atomic.Load8(&.parkingOnChan) == 0
}
func ( *g) {
if .stack.lo == 0 {
throw("missing stack in shrinkstack")
}
if := readgstatus(); &_Gscan == 0 {
if !( == getg().m.curg && getg() != getg().m.curg && == _Grunning) {
throw("bad status in shrinkstack")
}
}
if !isShrinkStackSafe() {
throw("shrinkstack at bad time")
}
if == getg().m.curg && .m.libcallsp != 0 {
throw("shrinking stack in libcall")
}
if debug.gcshrinkstackoff > 0 {
return
}
:= findfunc(.startpc)
if .valid() && .funcID == funcID_gcBgMarkWorker {
return
}
:= .stack.hi - .stack.lo
:= / 2
if < _FixedStack {
return
}
:= .stack.hi - .stack.lo
if := .stack.hi - .sched.sp + _StackLimit; >= /4 {
return
}
if stackDebug > 0 {
print("shrinking stack ", , "->", , "\n")
}
copystack(, )
}
func () {
for := range stackpool {
lock(&stackpool[].item.mu)
:= &stackpool[].item.span
for := .first; != nil; {
:= .next
if .allocCount == 0 {
.remove()
.manualFreeList = 0
osStackFree()
mheap_.freeManual(, spanAllocStack)
}
=
}
unlock(&stackpool[].item.mu)
}
lock(&stackLarge.lock)
for := range stackLarge.free {
for := stackLarge.free[].first; != nil; {
:= .next
stackLarge.free[].remove()
osStackFree()
mheap_.freeManual(, spanAllocStack)
=
}
}
unlock(&stackLarge.lock)
}
func ( *stkframe, *pcvalueCache, bool) (, bitvector, []stackObjectRecord) {
:= .continpc
if == 0 {
return
}
:= .fn
:= int32(-1)
if != .entry() {
--
= pcdatavalue(, _PCDATA_StackMapIndex, , )
}
if == -1 {
= 0
}
:= .varp - .sp
var uintptr
switch goarch.ArchFamily {
case goarch.ARM64:
= sys.StackAlign
default:
= sys.MinFrameSize
}
if > {
:=
:= (*stackmap)(funcdata(, _FUNCDATA_LocalsPointerMaps))
if == nil || .n <= 0 {
print("runtime: frame ", funcname(), " untyped locals ", hex(.varp-), "+", hex(), "\n")
throw("missing stackmap")
}
if .nbit > 0 {
if < 0 || >= .n {
print("runtime: pcdata is ", , " and ", .n, " locals stack map entries for ", funcname(), " (targetpc=", hex(), ")\n")
throw("bad symbol table")
}
= stackmapdata(, )
if stackDebug >= 3 && {
print(" locals ", , "/", .n, " ", .n, " words ", .bytedata, "\n")
}
} else if stackDebug >= 3 && {
print(" no locals to adjust\n")
}
}
if .arglen > 0 {
if .argmap != nil {
= *.argmap
:= int32(.arglen / goarch.PtrSize)
if < .n {
.n =
}
} else {
:= (*stackmap)(funcdata(, _FUNCDATA_ArgsPointerMaps))
if == nil || .n <= 0 {
print("runtime: frame ", funcname(), " untyped args ", hex(.argp), "+", hex(.arglen), "\n")
throw("missing stackmap")
}
if < 0 || >= .n {
print("runtime: pcdata is ", , " and ", .n, " args stack map entries for ", funcname(), " (targetpc=", hex(), ")\n")
throw("bad symbol table")
}
if .nbit > 0 {
= stackmapdata(, )
}
}
}
if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le") && unsafe.Sizeof(abi.RegArgs{}) > 0 && .argmap != nil {
= methodValueCallFrameObjs[:]
} else {
:= funcdata(, _FUNCDATA_StackObjects)
if != nil {
:= *(*uintptr)()
= add(, goarch.PtrSize)
*(*slice)(unsafe.Pointer(&)) = slice{array: noescape(), len: int(), cap: int()}
}
}
return
}
var methodValueCallFrameObjs [1]stackObjectRecord
func () {
var any = abi.RegArgs{}
:= efaceOf(&)._type
if .kind&kindGCProg != 0 {
throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
}
:= uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
var *moduledata
for := &firstmoduledata; != nil; = .next {
if .gofunc <= && < .end {
=
break
}
}
if == nil {
throw("methodValueCallFrameObjs is not in a module")
}
methodValueCallFrameObjs[0] = stackObjectRecord{
off: -int32(alignUp(.size, 8)),
size: int32(.size),
_ptrdata: int32(.ptrdata),
gcdataoff: uint32(uintptr(unsafe.Pointer(.gcdata)) - .rodata),
}
}
type stackObjectRecord struct {
off int32
size int32
_ptrdata int32
gcdataoff uint32
}
func ( *stackObjectRecord) () bool {
return ._ptrdata < 0
}
func ( *stackObjectRecord) () uintptr {
:= ._ptrdata
if < 0 {
return uintptr(-)
}
return uintptr()
}
func ( *stackObjectRecord) () *byte {
:= uintptr(unsafe.Pointer())
var *moduledata
for := &firstmoduledata; != nil; = .next {
if .gofunc <= && < .end {
=
break
}
}
:= .rodata + uintptr(.gcdataoff)
return (*byte)(unsafe.Pointer())
}
func () {
throw("attempt to execute system stack code on user stack")
}