package runtime
import (
)
const (
tracebackCrash = 1 << iota
tracebackAll
tracebackShift = iota
)
var traceback_cache uint32 = 2 << tracebackShift
var traceback_env uint32
func () ( int32, , bool) {
:= getg()
:= atomic.Load(&traceback_cache)
= &tracebackCrash != 0
= .m.throwing > 0 || &tracebackAll != 0
if .m.traceback != 0 {
= int32(.m.traceback)
} else {
= int32( >> tracebackShift)
}
return
}
var (
argc int32
argv **byte
)
func ( **byte, int32) *byte {
return *(**byte)(add(unsafe.Pointer(), uintptr()*goarch.PtrSize))
}
func ( int32, **byte) {
argc =
argv =
sysargs(, )
}
func () {
if GOOS == "windows" {
return
}
argslice = make([]string, argc)
for := int32(0); < argc; ++ {
argslice[] = gostringnocopy(argv_index(argv, ))
}
}
func () {
:= int32(0)
for argv_index(argv, argc+1+) != nil {
++
}
envs = make([]string, )
for := int32(0); < ; ++ {
envs[] = gostring(argv_index(argv, argc+1+))
}
}
func () []string {
return envs
}
var test_z64, test_x64 uint64
func () {
test_z64 = 42
test_x64 = 0
if atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 0 {
throw("cas64 failed")
}
test_x64 = 42
if !atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 42 || test_z64 != 1 {
throw("cas64 failed")
}
if atomic.Load64(&test_z64) != 1 {
throw("load64 failed")
}
atomic.Store64(&test_z64, (1<<40)+1)
if atomic.Load64(&test_z64) != (1<<40)+1 {
throw("store64 failed")
}
if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
throw("xadd64 failed")
}
if atomic.Load64(&test_z64) != (2<<40)+2 {
throw("xadd64 failed")
}
if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
throw("xchg64 failed")
}
if atomic.Load64(&test_z64) != (3<<40)+3 {
throw("xchg64 failed")
}
}
func () {
var (
int8
uint8
int16
uint16
int32
uint32
int64
uint64
, float32
, float64
unsafe.Pointer
*uint16
[4]byte
)
type struct {
uint8
}
type struct {
uint8
}
var
var
if unsafe.Sizeof() != 1 {
throw("bad a")
}
if unsafe.Sizeof() != 1 {
throw("bad b")
}
if unsafe.Sizeof() != 2 {
throw("bad c")
}
if unsafe.Sizeof() != 2 {
throw("bad d")
}
if unsafe.Sizeof() != 4 {
throw("bad e")
}
if unsafe.Sizeof() != 4 {
throw("bad f")
}
if unsafe.Sizeof() != 8 {
throw("bad g")
}
if unsafe.Sizeof() != 8 {
throw("bad h")
}
if unsafe.Sizeof() != 4 {
throw("bad i")
}
if unsafe.Sizeof() != 8 {
throw("bad j")
}
if unsafe.Sizeof() != goarch.PtrSize {
throw("bad k")
}
if unsafe.Sizeof() != goarch.PtrSize {
throw("bad l")
}
if unsafe.Sizeof() != 1 {
throw("bad unsafe.Sizeof x1")
}
if unsafe.Offsetof(.) != 1 {
throw("bad offsetof y1.y")
}
if unsafe.Sizeof() != 2 {
throw("bad unsafe.Sizeof y1")
}
if timediv(12345*1000000000+54321, 1000000000, &) != 12345 || != 54321 {
throw("bad timediv")
}
var uint32
= 1
if !atomic.Cas(&, 1, 2) {
throw("cas1")
}
if != 2 {
throw("cas2")
}
= 4
if atomic.Cas(&, 5, 6) {
throw("cas3")
}
if != 4 {
throw("cas4")
}
= 0xffffffff
if !atomic.Cas(&, 0xffffffff, 0xfffffffe) {
throw("cas5")
}
if != 0xfffffffe {
throw("cas6")
}
= [4]byte{1, 1, 1, 1}
atomic.Or8(&[1], 0xf0)
if [0] != 1 || [1] != 0xf1 || [2] != 1 || [3] != 1 {
throw("atomicor8")
}
= [4]byte{0xff, 0xff, 0xff, 0xff}
atomic.And8(&[1], 0x1)
if [0] != 0xff || [1] != 0x1 || [2] != 0xff || [3] != 0xff {
throw("atomicand8")
}
*(*uint64)(unsafe.Pointer(&)) = ^uint64(0)
if == {
throw("float64nan")
}
if !( != ) {
throw("float64nan1")
}
*(*uint64)(unsafe.Pointer(&)) = ^uint64(1)
if == {
throw("float64nan2")
}
if !( != ) {
throw("float64nan3")
}
*(*uint32)(unsafe.Pointer(&)) = ^uint32(0)
if == {
throw("float32nan")
}
if == {
throw("float32nan1")
}
*(*uint32)(unsafe.Pointer(&)) = ^uint32(1)
if == {
throw("float32nan2")
}
if == {
throw("float32nan3")
}
testAtomic64()
if _FixedStack != round2(_FixedStack) {
throw("FixedStack is not power-of-2")
}
if !checkASM() {
throw("assembly checks failed")
}
}
type dbgVar struct {
name string
value *int32
}
var debug struct {
cgocheck int32
clobberfree int32
efence int32
gccheckmark int32
gcpacertrace int32
gcshrinkstackoff int32
gcstoptheworld int32
gctrace int32
invalidptr int32
madvdontneed int32
scavtrace int32
scheddetail int32
schedtrace int32
tracebackancestors int32
asyncpreemptoff int32
harddecommit int32
malloc bool
allocfreetrace int32
inittrace int32
sbrk int32
}
var dbgvars = []dbgVar{
{"allocfreetrace", &debug.allocfreetrace},
{"clobberfree", &debug.clobberfree},
{"cgocheck", &debug.cgocheck},
{"efence", &debug.efence},
{"gccheckmark", &debug.gccheckmark},
{"gcpacertrace", &debug.gcpacertrace},
{"gcshrinkstackoff", &debug.gcshrinkstackoff},
{"gcstoptheworld", &debug.gcstoptheworld},
{"gctrace", &debug.gctrace},
{"invalidptr", &debug.invalidptr},
{"madvdontneed", &debug.madvdontneed},
{"sbrk", &debug.sbrk},
{"scavtrace", &debug.scavtrace},
{"scheddetail", &debug.scheddetail},
{"schedtrace", &debug.schedtrace},
{"tracebackancestors", &debug.tracebackancestors},
{"asyncpreemptoff", &debug.asyncpreemptoff},
{"inittrace", &debug.inittrace},
{"harddecommit", &debug.harddecommit},
}
func () {
debug.cgocheck = 1
debug.invalidptr = 1
if GOOS == "linux" {
debug.madvdontneed = 1
}
for := gogetenv("GODEBUG"); != ""; {
:= ""
:= bytealg.IndexByteString(, ',')
if < 0 {
, = , ""
} else {
, = [:], [+1:]
}
= bytealg.IndexByteString(, '=')
if < 0 {
continue
}
, := [:], [+1:]
if == "memprofilerate" {
if , := atoi(); {
MemProfileRate =
}
} else {
for , := range dbgvars {
if .name == {
if , := atoi32(); {
*.value =
}
}
}
}
}
debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
setTraceback(gogetenv("GOTRACEBACK"))
traceback_env = traceback_cache
}
func ( string) {
var uint32
switch {
case "none":
= 0
case "single", "":
= 1 << tracebackShift
case "all":
= 1<<tracebackShift | tracebackAll
case "system":
= 2<<tracebackShift | tracebackAll
case "crash":
= 2<<tracebackShift | tracebackAll | tracebackCrash
default:
= tracebackAll
if , := atoi(); && == int(uint32()) {
|= uint32() << tracebackShift
}
}
if islibrary || isarchive {
|= tracebackCrash
}
|= traceback_env
atomic.Store(&traceback_cache, )
}
func ( int64, int32, *int32) int32 {
:= int32(0)
for := 30; >= 0; -- {
if >= int64()<<uint() {
= - (int64() << uint())
|= 1 << uint()
}
}
if >= int64() {
if != nil {
* = 0
}
return 0x7fffffff
}
if != nil {
* = int32()
}
return
}
func () *m {
:= getg()
.m.locks++
return .m
}
func ( *m) {
:= getg()
.locks--
if .locks == 0 && .preempt {
.stackguard0 = stackPreempt
}
}
func () ([]unsafe.Pointer, [][]int32) {
:= activeModules()
:= []unsafe.Pointer{unsafe.Pointer([0].types)}
:= [][]int32{[0].typelinks}
for , := range [1:] {
= append(, unsafe.Pointer(.types))
= append(, .typelinks)
}
return ,
}
func ( unsafe.Pointer, int32) unsafe.Pointer {
return unsafe.Pointer(resolveNameOff(, nameOff()).bytes)
}
func ( unsafe.Pointer, int32) unsafe.Pointer {
return unsafe.Pointer((*_type)().typeOff(typeOff()))
}
func ( unsafe.Pointer, int32) unsafe.Pointer {
return (*_type)().textOff(textOff())
}
func ( unsafe.Pointer, int32) unsafe.Pointer {
return unsafe.Pointer(resolveNameOff(, nameOff()).bytes)
}
func ( unsafe.Pointer, int32) unsafe.Pointer {
return unsafe.Pointer((*_type)().typeOff(typeOff()))
}
func ( unsafe.Pointer) int32 {
reflectOffsLock()
if reflectOffs.m == nil {
reflectOffs.m = make(map[int32]unsafe.Pointer)
reflectOffs.minv = make(map[unsafe.Pointer]int32)
reflectOffs.next = -1
}
, := reflectOffs.minv[]
if ! {
= reflectOffs.next
reflectOffs.next--
reflectOffs.m[] =
reflectOffs.minv[] =
}
reflectOffsUnlock()
return
}