// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package reflect implements run-time reflection, allowing a program to // manipulate objects with arbitrary types. The typical use is to take a value // with static type interface{} and extract its dynamic type information by // calling TypeOf, which returns a Type. // // A call to ValueOf returns a Value representing the run-time data. // Zero takes a Type and returns a Value representing a zero value // for that type. // // See "The Laws of Reflection" for an introduction to reflection in Go: // https://golang.org/doc/articles/laws_of_reflection.html
package reflect import ( ) // Type is the representation of a Go type. // // Not all methods apply to all kinds of types. Restrictions, // if any, are noted in the documentation for each method. // Use the Kind method to find out the kind of type before // calling kind-specific methods. Calling a method // inappropriate to the kind of type causes a run-time panic. // // Type values are comparable, such as with the == operator, // so they can be used as map keys. // Two Type values are equal if they represent identical types. type Type interface { // Methods applicable to all types. // Align returns the alignment in bytes of a value of // this type when allocated in memory. Align() int // FieldAlign returns the alignment in bytes of a value of // this type when used as a field in a struct. FieldAlign() int // Method returns the i'th method in the type's method set. // It panics if i is not in the range [0, NumMethod()). // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver, // and only exported methods are accessible. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. // // Methods are sorted in lexicographic order. Method(int) Method // MethodByName returns the method with that name in the type's // method set and a boolean indicating if the method was found. // // For a non-interface type T or *T, the returned Method's Type and Func // fields describe a function whose first argument is the receiver. // // For an interface type, the returned Method's Type field gives the // method signature, without a receiver, and the Func field is nil. MethodByName(string) (Method, bool) // NumMethod returns the number of methods accessible using Method. // // Note that NumMethod counts unexported methods only for interface types. NumMethod() int // Name returns the type's name within its package for a defined type. // For other (non-defined) types it returns the empty string. Name() string // PkgPath returns a defined type's package path, that is, the import path // that uniquely identifies the package, such as "encoding/base64". // If the type was predeclared (string, error) or not defined (*T, struct{}, // []int, or A where A is an alias for a non-defined type), the package path // will be the empty string. PkgPath() string // Size returns the number of bytes needed to store // a value of the given type; it is analogous to unsafe.Sizeof. Size() uintptr // String returns a string representation of the type. // The string representation may use shortened package names // (e.g., base64 instead of "encoding/base64") and is not // guaranteed to be unique among types. To test for type identity, // compare the Types directly. String() string // Kind returns the specific kind of this type. Kind() Kind // Implements reports whether the type implements the interface type u. Implements(u Type) bool // AssignableTo reports whether a value of the type is assignable to type u. AssignableTo(u Type) bool // ConvertibleTo reports whether a value of the type is convertible to type u. // Even if ConvertibleTo returns true, the conversion may still panic. // For example, a slice of type []T is convertible to *[N]T, // but the conversion will panic if its length is less than N. ConvertibleTo(u Type) bool // Comparable reports whether values of this type are comparable. // Even if Comparable returns true, the comparison may still panic. // For example, values of interface type are comparable, // but the comparison will panic if their dynamic type is not comparable. Comparable() bool // Methods applicable only to some types, depending on Kind. // The methods allowed for each kind are: // // Int*, Uint*, Float*, Complex*: Bits // Array: Elem, Len // Chan: ChanDir, Elem // Func: In, NumIn, Out, NumOut, IsVariadic. // Map: Key, Elem // Pointer: Elem // Slice: Elem // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField // Bits returns the size of the type in bits. // It panics if the type's Kind is not one of the // sized or unsized Int, Uint, Float, or Complex kinds. Bits() int // ChanDir returns a channel type's direction. // It panics if the type's Kind is not Chan. ChanDir() ChanDir // IsVariadic reports whether a function type's final input parameter // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's // implicit actual type []T. // // For concreteness, if t represents func(x int, y ... float64), then // // t.NumIn() == 2 // t.In(0) is the reflect.Type for "int" // t.In(1) is the reflect.Type for "[]float64" // t.IsVariadic() == true // // IsVariadic panics if the type's Kind is not Func. IsVariadic() bool // Elem returns a type's element type. // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice. Elem() Type // Field returns a struct type's i'th field. // It panics if the type's Kind is not Struct. // It panics if i is not in the range [0, NumField()). Field(i int) StructField // FieldByIndex returns the nested field corresponding // to the index sequence. It is equivalent to calling Field // successively for each index i. // It panics if the type's Kind is not Struct. FieldByIndex(index []int) StructField // FieldByName returns the struct field with the given name // and a boolean indicating if the field was found. FieldByName(name string) (StructField, bool) // FieldByNameFunc returns the struct field with a name // that satisfies the match function and a boolean indicating if // the field was found. // // FieldByNameFunc considers the fields in the struct itself // and then the fields in any embedded structs, in breadth first order, // stopping at the shallowest nesting depth containing one or more // fields satisfying the match function. If multiple fields at that depth // satisfy the match function, they cancel each other // and FieldByNameFunc returns no match. // This behavior mirrors Go's handling of name lookup in // structs containing embedded fields. FieldByNameFunc(match func(string) bool) (StructField, bool) // In returns the type of a function type's i'th input parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumIn()). In(i int) Type // Key returns a map type's key type. // It panics if the type's Kind is not Map. Key() Type // Len returns an array type's length. // It panics if the type's Kind is not Array. Len() int // NumField returns a struct type's field count. // It panics if the type's Kind is not Struct. NumField() int // NumIn returns a function type's input parameter count. // It panics if the type's Kind is not Func. NumIn() int // NumOut returns a function type's output parameter count. // It panics if the type's Kind is not Func. NumOut() int // Out returns the type of a function type's i'th output parameter. // It panics if the type's Kind is not Func. // It panics if i is not in the range [0, NumOut()). Out(i int) Type common() *rtype uncommon() *uncommonType } // BUG(rsc): FieldByName and related functions consider struct field names to be equal // if the names are equal, even if they are unexported names originating // in different packages. The practical effect of this is that the result of // t.FieldByName("x") is not well defined if the struct type t contains // multiple fields named x (embedded from different packages). // FieldByName may return one of the fields named x or may report that there are none. // See https://golang.org/issue/4876 for more details. /* * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go). * A few are known to ../runtime/type.go to convey to debuggers. * They are also known to ../runtime/type.go. */ // A Kind represents the specific kind of type that a Type represents. // The zero Kind is not a valid kind. type Kind uint const ( Invalid Kind = iota Bool Int Int8 Int16 Int32 Int64 Uint Uint8 Uint16 Uint32 Uint64 Uintptr Float32 Float64 Complex64 Complex128 Array Chan Func Interface Map Pointer Slice String Struct UnsafePointer ) // Ptr is the old name for the Pointer kind. const Ptr = Pointer // tflag is used by an rtype to signal what extra type information is // available in the memory directly following the rtype value. // // tflag values must be kept in sync with copies in: // cmd/compile/internal/reflectdata/reflect.go // cmd/link/internal/ld/decodesym.go // runtime/type.go type tflag uint8 const ( // tflagUncommon means that there is a pointer, *uncommonType, // just beyond the outer type structure. // // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, // then t has uncommonType data and it can be accessed as: // // type tUncommon struct { // structType // u uncommonType // } // u := &(*tUncommon)(unsafe.Pointer(t)).u tflagUncommon tflag = 1 << 0 // tflagExtraStar means the name in the str field has an // extraneous '*' prefix. This is because for most types T in // a program, the type *T also exists and reusing the str data // saves binary size. tflagExtraStar tflag = 1 << 1 // tflagNamed means the type has a name. tflagNamed tflag = 1 << 2 // tflagRegularMemory means that equal and hash functions can treat // this type as a single region of t.size bytes. tflagRegularMemory tflag = 1 << 3 ) // rtype is the common implementation of most values. // It is embedded in other struct types. // // rtype must be kept in sync with ../runtime/type.go:/^type._type. type rtype struct { size uintptr ptrdata uintptr // number of bytes in the type that can contain pointers hash uint32 // hash of type; avoids computation in hash tables tflag tflag // extra type information flags align uint8 // alignment of variable with this type fieldAlign uint8 // alignment of struct field with this type kind uint8 // enumeration for C // function for comparing objects of this type // (ptr to object A, ptr to object B) -> ==? equal func(unsafe.Pointer, unsafe.Pointer) bool gcdata *byte // garbage collection data str nameOff // string form ptrToThis typeOff // type for pointer to this type, may be zero } // Method on non-interface type type method struct { name nameOff // name of method mtyp typeOff // method type (without receiver) ifn textOff // fn used in interface call (one-word receiver) tfn textOff // fn used for normal method call } // uncommonType is present only for defined types or types with methods // (if T is a defined type, the uncommonTypes for T and *T have methods). // Using a pointer to this struct reduces the overall size required // to describe a non-defined type with no methods. type uncommonType struct { pkgPath nameOff // import path; empty for built-in types like int, string mcount uint16 // number of methods xcount uint16 // number of exported methods moff uint32 // offset from this uncommontype to [mcount]method _ uint32 // unused } // ChanDir represents a channel type's direction. type ChanDir int const ( RecvDir ChanDir = 1 << iota // <-chan SendDir // chan<- BothDir = RecvDir | SendDir // chan ) // arrayType represents a fixed array type. type arrayType struct { rtype elem *rtype // array element type slice *rtype // slice type len uintptr } // chanType represents a channel type. type chanType struct { rtype elem *rtype // channel element type dir uintptr // channel direction (ChanDir) } // funcType represents a function type. // // A *rtype for each in and out parameter is stored in an array that // directly follows the funcType (and possibly its uncommonType). So // a function type with one method, one input, and one output is: // // struct { // funcType // uncommonType // [2]*rtype // [0] is in, [1] is out // } type funcType struct { rtype inCount uint16 outCount uint16 // top bit is set if last input parameter is ... } // imethod represents a method on an interface type type imethod struct { name nameOff // name of method typ typeOff // .(*FuncType) underneath } // interfaceType represents an interface type. type interfaceType struct { rtype pkgPath name // import path methods []imethod // sorted by hash } // mapType represents a map type. type mapType struct { rtype key *rtype // map key type elem *rtype // map element (value) type bucket *rtype // internal bucket structure // function for hashing keys (ptr to key, seed) -> hash hasher func(unsafe.Pointer, uintptr) uintptr keysize uint8 // size of key slot valuesize uint8 // size of value slot bucketsize uint16 // size of bucket flags uint32 } // ptrType represents a pointer type. type ptrType struct { rtype elem *rtype // pointer element (pointed at) type } // sliceType represents a slice type. type sliceType struct { rtype elem *rtype // slice element type } // Struct field type structField struct { name name // name is always non-empty typ *rtype // type of field offsetEmbed uintptr // byte offset of field<<1 | isEmbedded } func ( *structField) () uintptr { return .offsetEmbed >> 1 } func ( *structField) () bool { return .offsetEmbed&1 != 0 } // structType represents a struct type. type structType struct { rtype pkgPath name fields []structField // sorted by offset } // name is an encoded type name with optional extra data. // // The first byte is a bit field containing: // // 1<<0 the name is exported // 1<<1 tag data follows the name // 1<<2 pkgPath nameOff follows the name and tag // // Following that, there is a varint-encoded length of the name, // followed by the name itself. // // If tag data is present, it also has a varint-encoded length // followed by the tag itself. // // If the import path follows, then 4 bytes at the end of // the data form a nameOff. The import path is only set for concrete // methods that are defined in a different package than their type. // // If a name starts with "*", then the exported bit represents // whether the pointed to type is exported. // // Note: this encoding must match here and in: // cmd/compile/internal/reflectdata/reflect.go // runtime/type.go // internal/reflectlite/type.go // cmd/link/internal/ld/decodesym.go type name struct { bytes *byte } func ( name) ( int, string) *byte { return (*byte)(add(unsafe.Pointer(.bytes), uintptr(), )) } func ( name) () bool { return (*.bytes)&(1<<0) != 0 } func ( name) () bool { return (*.bytes)&(1<<1) != 0 } // readVarint parses a varint as encoded by encoding/binary. // It returns the number of encoded bytes and the encoded value. func ( name) ( int) (int, int) { := 0 for := 0; ; ++ { := *.data(+, "read varint") += int(&0x7f) << (7 * ) if &0x80 == 0 { return + 1, } } } // writeVarint writes n to buf in varint form. Returns the // number of bytes written. n must be nonnegative. // Writes at most 10 bytes. func ( []byte, int) int { for := 0; ; ++ { := byte( & 0x7f) >>= 7 if == 0 { [] = return + 1 } [] = | 0x80 } } func ( name) () ( string) { if .bytes == nil { return } , := .readVarint(1) := (*unsafeheader.String)(unsafe.Pointer(&)) .Data = unsafe.Pointer(.data(1+, "non-empty string")) .Len = return } func ( name) () ( string) { if !.hasTag() { return "" } , := .readVarint(1) , := .readVarint(1 + + ) := (*unsafeheader.String)(unsafe.Pointer(&)) .Data = unsafe.Pointer(.data(1+++, "non-empty string")) .Len = return } func ( name) () string { if .bytes == nil || *.data(0, "name flag field")&(1<<2) == 0 { return "" } , := .readVarint(1) := 1 + + if .hasTag() { , := .readVarint() += + } var int32 // Note that this field may not be aligned in memory, // so we cannot use a direct int32 assignment here. copy((*[4]byte)(unsafe.Pointer(&))[:], (*[4]byte)(unsafe.Pointer(.data(, "name offset field")))[:]) := name{(*byte)(resolveTypeOff(unsafe.Pointer(.bytes), ))} return .name() } func (, string, bool) name { if len() >= 1<<29 { panic("reflect.nameFrom: name too long: " + [:1024] + "...") } if len() >= 1<<29 { panic("reflect.nameFrom: tag too long: " + [:1024] + "...") } var [10]byte var [10]byte := writeVarint([:], len()) := writeVarint([:], len()) var byte := 1 + + len() if { |= 1 << 0 } if len() > 0 { += + len() |= 1 << 1 } := make([]byte, ) [0] = copy([1:], [:]) copy([1+:], ) if len() > 0 { := [1++len():] copy(, [:]) copy([:], ) } return name{bytes: &[0]} } /* * The compiler knows the exact layout of all the data structures above. * The compiler does not know about the data structures and methods below. */ // Method represents a single method. type Method struct { // Name is the method name. Name string // PkgPath is the package path that qualifies a lower case (unexported) // method name. It is empty for upper case (exported) method names. // The combination of PkgPath and Name uniquely identifies a method // in a method set. // See https://golang.org/ref/spec#Uniqueness_of_identifiers PkgPath string Type Type // method type Func Value // func with receiver as first argument Index int // index for Type.Method } // IsExported reports whether the method is exported. func ( Method) () bool { return .PkgPath == "" } const ( kindDirectIface = 1 << 5 kindGCProg = 1 << 6 // Type.gc points to GC program kindMask = (1 << 5) - 1 ) // String returns the name of k. func ( Kind) () string { if int() < len(kindNames) { return kindNames[] } return "kind" + strconv.Itoa(int()) } var kindNames = []string{ Invalid: "invalid", Bool: "bool", Int: "int", Int8: "int8", Int16: "int16", Int32: "int32", Int64: "int64", Uint: "uint", Uint8: "uint8", Uint16: "uint16", Uint32: "uint32", Uint64: "uint64", Uintptr: "uintptr", Float32: "float32", Float64: "float64", Complex64: "complex64", Complex128: "complex128", Array: "array", Chan: "chan", Func: "func", Interface: "interface", Map: "map", Pointer: "ptr", Slice: "slice", String: "string", Struct: "struct", UnsafePointer: "unsafe.Pointer", } func ( *uncommonType) () []method { if .mcount == 0 { return nil } return (*[1 << 16]method)(add(unsafe.Pointer(), uintptr(.moff), "t.mcount > 0"))[:.mcount:.mcount] } func ( *uncommonType) () []method { if .xcount == 0 { return nil } return (*[1 << 16]method)(add(unsafe.Pointer(), uintptr(.moff), "t.xcount > 0"))[:.xcount:.xcount] } // resolveNameOff resolves a name offset from a base pointer. // The (*rtype).nameOff method is a convenience wrapper for this function. // Implemented in the runtime package. func ( unsafe.Pointer, int32) unsafe.Pointer // resolveTypeOff resolves an *rtype offset from a base type. // The (*rtype).typeOff method is a convenience wrapper for this function. // Implemented in the runtime package. func ( unsafe.Pointer, int32) unsafe.Pointer // resolveTextOff resolves a function pointer offset from a base type. // The (*rtype).textOff method is a convenience wrapper for this function. // Implemented in the runtime package. func ( unsafe.Pointer, int32) unsafe.Pointer // addReflectOff adds a pointer to the reflection lookup map in the runtime. // It returns a new ID that can be used as a typeOff or textOff, and will // be resolved correctly. Implemented in the runtime package. func ( unsafe.Pointer) int32 // resolveReflectName adds a name to the reflection lookup map in the runtime. // It returns a new nameOff that can be used to refer to the pointer. func ( name) nameOff { return nameOff(addReflectOff(unsafe.Pointer(.bytes))) } // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. // It returns a new typeOff that can be used to refer to the pointer. func ( *rtype) typeOff { return typeOff(addReflectOff(unsafe.Pointer())) } // resolveReflectText adds a function pointer to the reflection lookup map in // the runtime. It returns a new textOff that can be used to refer to the // pointer. func ( unsafe.Pointer) textOff { return textOff(addReflectOff()) } type nameOff int32 // offset to a name type typeOff int32 // offset to an *rtype type textOff int32 // offset from top of text section func ( *rtype) ( nameOff) name { return name{(*byte)(resolveNameOff(unsafe.Pointer(), int32()))} } func ( *rtype) ( typeOff) *rtype { return (*rtype)(resolveTypeOff(unsafe.Pointer(), int32())) } func ( *rtype) ( textOff) unsafe.Pointer { return resolveTextOff(unsafe.Pointer(), int32()) } func ( *rtype) () *uncommonType { if .tflag&tflagUncommon == 0 { return nil } switch .Kind() { case Struct: return &(*structTypeUncommon)(unsafe.Pointer()).u case Pointer: type struct { ptrType uncommonType } return &(*)(unsafe.Pointer()). case Func: type struct { funcType uncommonType } return &(*)(unsafe.Pointer()). case Slice: type struct { sliceType uncommonType } return &(*)(unsafe.Pointer()). case Array: type struct { arrayType uncommonType } return &(*)(unsafe.Pointer()). case Chan: type struct { chanType uncommonType } return &(*)(unsafe.Pointer()). case Map: type struct { mapType uncommonType } return &(*)(unsafe.Pointer()). case Interface: type struct { interfaceType uncommonType } return &(*)(unsafe.Pointer()). default: type struct { rtype uncommonType } return &(*)(unsafe.Pointer()). } } func ( *rtype) () string { := .nameOff(.str).name() if .tflag&tflagExtraStar != 0 { return [1:] } return } func ( *rtype) () uintptr { return .size } func ( *rtype) () int { if == nil { panic("reflect: Bits of nil Type") } := .Kind() if < Int || > Complex128 { panic("reflect: Bits of non-arithmetic Type " + .String()) } return int(.size) * 8 } func ( *rtype) () int { return int(.align) } func ( *rtype) () int { return int(.fieldAlign) } func ( *rtype) () Kind { return Kind(.kind & kindMask) } func ( *rtype) () bool { return .ptrdata != 0 } func ( *rtype) () *rtype { return } func ( *rtype) () []method { := .uncommon() if == nil { return nil } return .exportedMethods() } func ( *rtype) () int { if .Kind() == Interface { := (*interfaceType)(unsafe.Pointer()) return .NumMethod() } return len(.exportedMethods()) } func ( *rtype) ( int) ( Method) { if .Kind() == Interface { := (*interfaceType)(unsafe.Pointer()) return .Method() } := .exportedMethods() if < 0 || >= len() { panic("reflect: Method index out of range") } := [] := .nameOff(.name) .Name = .name() := flag(Func) := .typeOff(.mtyp) := (*funcType)(unsafe.Pointer()) := make([]Type, 0, 1+len(.in())) = append(, ) for , := range .in() { = append(, ) } := make([]Type, 0, len(.out())) for , := range .out() { = append(, ) } := FuncOf(, , .IsVariadic()) .Type = := .textOff(.tfn) := unsafe.Pointer(&) .Func = Value{.(*rtype), , } .Index = return } func ( *rtype) ( string) ( Method, bool) { if .Kind() == Interface { := (*interfaceType)(unsafe.Pointer()) return .MethodByName() } := .uncommon() if == nil { return Method{}, false } // TODO(mdempsky): Binary search. for , := range .exportedMethods() { if .nameOff(.name).name() == { return .Method(), true } } return Method{}, false } func ( *rtype) () string { if .tflag&tflagNamed == 0 { return "" } := .uncommon() if == nil { return "" } return .nameOff(.pkgPath).name() } func ( *rtype) () bool { return .tflag&tflagNamed != 0 } func ( *rtype) () string { if !.hasName() { return "" } := .String() := len() - 1 := 0 for >= 0 && ([] != '.' || != 0) { switch [] { case ']': ++ case '[': -- } -- } return [+1:] } func ( *rtype) () ChanDir { if .Kind() != Chan { panic("reflect: ChanDir of non-chan type " + .String()) } := (*chanType)(unsafe.Pointer()) return ChanDir(.dir) } func ( *rtype) () bool { if .Kind() != Func { panic("reflect: IsVariadic of non-func type " + .String()) } := (*funcType)(unsafe.Pointer()) return .outCount&(1<<15) != 0 } func ( *rtype) () Type { switch .Kind() { case Array: := (*arrayType)(unsafe.Pointer()) return toType(.elem) case Chan: := (*chanType)(unsafe.Pointer()) return toType(.elem) case Map: := (*mapType)(unsafe.Pointer()) return toType(.elem) case Pointer: := (*ptrType)(unsafe.Pointer()) return toType(.elem) case Slice: := (*sliceType)(unsafe.Pointer()) return toType(.elem) } panic("reflect: Elem of invalid type " + .String()) } func ( *rtype) ( int) StructField { if .Kind() != Struct { panic("reflect: Field of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .Field() } func ( *rtype) ( []int) StructField { if .Kind() != Struct { panic("reflect: FieldByIndex of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .FieldByIndex() } func ( *rtype) ( string) (StructField, bool) { if .Kind() != Struct { panic("reflect: FieldByName of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .FieldByName() } func ( *rtype) ( func(string) bool) (StructField, bool) { if .Kind() != Struct { panic("reflect: FieldByNameFunc of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return .FieldByNameFunc() } func ( *rtype) ( int) Type { if .Kind() != Func { panic("reflect: In of non-func type " + .String()) } := (*funcType)(unsafe.Pointer()) return toType(.in()[]) } func ( *rtype) () Type { if .Kind() != Map { panic("reflect: Key of non-map type " + .String()) } := (*mapType)(unsafe.Pointer()) return toType(.key) } func ( *rtype) () int { if .Kind() != Array { panic("reflect: Len of non-array type " + .String()) } := (*arrayType)(unsafe.Pointer()) return int(.len) } func ( *rtype) () int { if .Kind() != Struct { panic("reflect: NumField of non-struct type " + .String()) } := (*structType)(unsafe.Pointer()) return len(.fields) } func ( *rtype) () int { if .Kind() != Func { panic("reflect: NumIn of non-func type " + .String()) } := (*funcType)(unsafe.Pointer()) return int(.inCount) } func ( *rtype) () int { if .Kind() != Func { panic("reflect: NumOut of non-func type " + .String()) } := (*funcType)(unsafe.Pointer()) return len(.out()) } func ( *rtype) ( int) Type { if .Kind() != Func { panic("reflect: Out of non-func type " + .String()) } := (*funcType)(unsafe.Pointer()) return toType(.out()[]) } func ( *funcType) () []*rtype { := unsafe.Sizeof(*) if .tflag&tflagUncommon != 0 { += unsafe.Sizeof(uncommonType{}) } if .inCount == 0 { return nil } return (*[1 << 20]*rtype)(add(unsafe.Pointer(), , "t.inCount > 0"))[:.inCount:.inCount] } func ( *funcType) () []*rtype { := unsafe.Sizeof(*) if .tflag&tflagUncommon != 0 { += unsafe.Sizeof(uncommonType{}) } := .outCount & (1<<15 - 1) if == 0 { return nil } return (*[1 << 20]*rtype)(add(unsafe.Pointer(), , "outCount > 0"))[.inCount : .inCount+ : .inCount+] } // add returns p+x. // // The whySafe string is ignored, so that the function still inlines // as efficiently as p+x, but all call sites should use the string to // record why the addition is safe, which is to say why the addition // does not cause x to advance to the very end of p's allocation // and therefore point incorrectly at the next block in memory. func ( unsafe.Pointer, uintptr, string) unsafe.Pointer { return unsafe.Pointer(uintptr() + ) } func ( ChanDir) () string { switch { case SendDir: return "chan<-" case RecvDir: return "<-chan" case BothDir: return "chan" } return "ChanDir" + strconv.Itoa(int()) } // Method returns the i'th method in the type's method set. func ( *interfaceType) ( int) ( Method) { if < 0 || >= len(.methods) { return } := &.methods[] := .nameOff(.name) .Name = .name() if !.isExported() { .PkgPath = .pkgPath() if .PkgPath == "" { .PkgPath = .pkgPath.name() } } .Type = toType(.typeOff(.typ)) .Index = return } // NumMethod returns the number of interface methods in the type's method set. func ( *interfaceType) () int { return len(.methods) } // MethodByName method with the given name in the type's method set. func ( *interfaceType) ( string) ( Method, bool) { if == nil { return } var *imethod for := range .methods { = &.methods[] if .nameOff(.name).name() == { return .Method(), true } } return } // A StructField describes a single field in a struct. type StructField struct { // Name is the field name. Name string // PkgPath is the package path that qualifies a lower case (unexported) // field name. It is empty for upper case (exported) field names. // See https://golang.org/ref/spec#Uniqueness_of_identifiers PkgPath string Type Type // field type Tag StructTag // field tag string Offset uintptr // offset within struct, in bytes Index []int // index sequence for Type.FieldByIndex Anonymous bool // is an embedded field } // IsExported reports whether the field is exported. func ( StructField) () bool { return .PkgPath == "" } // A StructTag is the tag string in a struct field. // // By convention, tag strings are a concatenation of // optionally space-separated key:"value" pairs. // Each key is a non-empty string consisting of non-control // characters other than space (U+0020 ' '), quote (U+0022 '"'), // and colon (U+003A ':'). Each value is quoted using U+0022 '"' // characters and Go string literal syntax. type StructTag string // Get returns the value associated with key in the tag string. // If there is no such key in the tag, Get returns the empty string. // If the tag does not have the conventional format, the value // returned by Get is unspecified. To determine whether a tag is // explicitly set to the empty string, use Lookup. func ( StructTag) ( string) string { , := .Lookup() return } // Lookup returns the value associated with key in the tag string. // If the key is present in the tag the value (which may be empty) // is returned. Otherwise the returned value will be the empty string. // The ok return value reports whether the value was explicitly set in // the tag string. If the tag does not have the conventional format, // the value returned by Lookup is unspecified. func ( StructTag) ( string) ( string, bool) { // When modifying this code, also update the validateStructTag code // in cmd/vet/structtag.go. for != "" { // Skip leading space. := 0 for < len() && [] == ' ' { ++ } = [:] if == "" { break } // Scan to colon. A space, a quote or a control character is a syntax error. // Strictly speaking, control chars include the range [0x7f, 0x9f], not just // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters // as it is simpler to inspect the tag's bytes than the tag's runes. = 0 for < len() && [] > ' ' && [] != ':' && [] != '"' && [] != 0x7f { ++ } if == 0 || +1 >= len() || [] != ':' || [+1] != '"' { break } := string([:]) = [+1:] // Scan quoted string to find value. = 1 for < len() && [] != '"' { if [] == '\\' { ++ } ++ } if >= len() { break } := string([:+1]) = [+1:] if == { , := strconv.Unquote() if != nil { break } return , true } } return "", false } // Field returns the i'th struct field. func ( *structType) ( int) ( StructField) { if < 0 || >= len(.fields) { panic("reflect: Field index out of bounds") } := &.fields[] .Type = toType(.typ) .Name = .name.name() .Anonymous = .embedded() if !.name.isExported() { .PkgPath = .pkgPath.name() } if := .name.tag(); != "" { .Tag = StructTag() } .Offset = .offset() // NOTE(rsc): This is the only allocation in the interface // presented by a reflect.Type. It would be nice to avoid, // at least in the common cases, but we need to make sure // that misbehaving clients of reflect cannot affect other // uses of reflect. One possibility is CL 5371098, but we // postponed that ugliness until there is a demonstrated // need for the performance. This is issue 2320. .Index = []int{} return } // TODO(gri): Should there be an error/bool indicator if the index // is wrong for FieldByIndex? // FieldByIndex returns the nested field corresponding to index. func ( *structType) ( []int) ( StructField) { .Type = toType(&.rtype) for , := range { if > 0 { := .Type if .Kind() == Pointer && .Elem().Kind() == Struct { = .Elem() } .Type = } = .Type.Field() } return } // A fieldScan represents an item on the fieldByNameFunc scan work list. type fieldScan struct { typ *structType index []int } // FieldByNameFunc returns the struct field with a name that satisfies the // match function and a boolean to indicate if the field was found. func ( *structType) ( func(string) bool) ( StructField, bool) { // This uses the same condition that the Go language does: there must be a unique instance // of the match at a given depth level. If there are multiple instances of a match at the // same depth, they annihilate each other and inhibit any possible match at a lower level. // The algorithm is breadth first search, one depth level at a time. // The current and next slices are work queues: // current lists the fields to visit on this depth level, // and next lists the fields on the next lower level. := []fieldScan{} := []fieldScan{{typ: }} // nextCount records the number of times an embedded type has been // encountered and considered for queueing in the 'next' slice. // We only queue the first one, but we increment the count on each. // If a struct type T can be reached more than once at a given depth level, // then it annihilates itself and need not be considered at all when we // process that next depth level. var map[*structType]int // visited records the structs that have been considered already. // Embedded pointer fields can create cycles in the graph of // reachable embedded types; visited avoids following those cycles. // It also avoids duplicated effort: if we didn't find the field in an // embedded type T at level 2, we won't find it in one at level 4 either. := map[*structType]bool{} for len() > 0 { , = , [:0] := = nil // Process all the fields at this depth, now listed in 'current'. // The loop queues embedded fields found in 'next', for processing during the next // iteration. The multiplicity of the 'current' field counts is recorded // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. for , := range { := .typ if [] { // We've looked through this type before, at a higher level. // That higher level would shadow the lower level we're now at, // so this one can't be useful to us. Ignore it. continue } [] = true for := range .fields { := &.fields[] // Find name and (for embedded field) type for field f. := .name.name() var *rtype if .embedded() { // Embedded field of type T or *T. = .typ if .Kind() == Pointer { = .Elem().common() } } // Does it match? if () { // Potential match if [] > 1 || { // Name appeared multiple times at this level: annihilate. return StructField{}, false } = .Field() .Index = nil .Index = append(.Index, .index...) .Index = append(.Index, ) = true continue } // Queue embedded struct fields for processing with next level, // but only if we haven't seen a match yet at this level and only // if the embedded types haven't already been queued. if || == nil || .Kind() != Struct { continue } := (*structType)(unsafe.Pointer()) if [] > 0 { [] = 2 // exact multiple doesn't matter continue } if == nil { = map[*structType]int{} } [] = 1 if [] > 1 { [] = 2 // exact multiple doesn't matter } var []int = append(, .index...) = append(, ) = append(, fieldScan{, }) } } if { break } } return } // FieldByName returns the struct field with the given name // and a boolean to indicate if the field was found. func ( *structType) ( string) ( StructField, bool) { // Quick check for top-level name, or struct without embedded fields. := false if != "" { for := range .fields { := &.fields[] if .name.name() == { return .Field(), true } if .embedded() { = true } } } if ! { return } return .FieldByNameFunc(func( string) bool { return == }) } // TypeOf returns the reflection Type that represents the dynamic type of i. // If i is a nil interface value, TypeOf returns nil. func ( any) Type { := *(*emptyInterface)(unsafe.Pointer(&)) return toType(.typ) } // ptrMap is the cache for PointerTo. var ptrMap sync.Map // map[*rtype]*ptrType // PtrTo returns the pointer type with element t. // For example, if t represents type Foo, PtrTo(t) represents *Foo. // // PtrTo is the old spelling of PointerTo. // The two functions behave identically. func ( Type) Type { return PointerTo() } // PointerTo returns the pointer type with element t. // For example, if t represents type Foo, PointerTo(t) represents *Foo. func ( Type) Type { return .(*rtype).ptrTo() } func ( *rtype) () *rtype { if .ptrToThis != 0 { return .typeOff(.ptrToThis) } // Check the cache. if , := ptrMap.Load(); { return &.(*ptrType).rtype } // Look in known types. := "*" + .String() for , := range typesByString() { := (*ptrType)(unsafe.Pointer()) if .elem != { continue } , := ptrMap.LoadOrStore(, ) return &.(*ptrType).rtype } // Create a new ptrType starting with the description // of an *unsafe.Pointer. var any = (*unsafe.Pointer)(nil) := *(**ptrType)(unsafe.Pointer(&)) := * .str = resolveReflectName(newName(, "", false)) .ptrToThis = 0 // For the type structures linked into the binary, the // compiler provides a good hash of the string. // Create a good hash for the new string by using // the FNV-1 hash's mixing function to combine the // old hash and the new "*". .hash = fnv1(.hash, '*') .elem = , := ptrMap.LoadOrStore(, &) return &.(*ptrType).rtype } // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function. func ( uint32, ...byte) uint32 { for , := range { = *16777619 ^ uint32() } return } func ( *rtype) ( Type) bool { if == nil { panic("reflect: nil type passed to Type.Implements") } if .Kind() != Interface { panic("reflect: non-interface type passed to Type.Implements") } return implements(.(*rtype), ) } func ( *rtype) ( Type) bool { if == nil { panic("reflect: nil type passed to Type.AssignableTo") } := .(*rtype) return directlyAssignable(, ) || implements(, ) } func ( *rtype) ( Type) bool { if == nil { panic("reflect: nil type passed to Type.ConvertibleTo") } := .(*rtype) return convertOp(, ) != nil } func ( *rtype) () bool { return .equal != nil } // implements reports whether the type V implements the interface type T. func (, *rtype) bool { if .Kind() != Interface { return false } := (*interfaceType)(unsafe.Pointer()) if len(.methods) == 0 { return true } // The same algorithm applies in both cases, but the // method tables for an interface type and a concrete type // are different, so the code is duplicated. // In both cases the algorithm is a linear scan over the two // lists - T's methods and V's methods - simultaneously. // Since method tables are stored in a unique sorted order // (alphabetical, with no duplicate method names), the scan // through V's methods must hit a match for each of T's // methods along the way, or else V does not implement T. // This lets us run the scan in overall linear time instead of // the quadratic time a naive search would require. // See also ../runtime/iface.go. if .Kind() == Interface { := (*interfaceType)(unsafe.Pointer()) := 0 for := 0; < len(.methods); ++ { := &.methods[] := .nameOff(.name) := &.methods[] := .nameOff(.name) if .name() == .name() && .typeOff(.typ) == .typeOff(.typ) { if !.isExported() { := .pkgPath() if == "" { = .pkgPath.name() } := .pkgPath() if == "" { = .pkgPath.name() } if != { continue } } if ++; >= len(.methods) { return true } } } return false } := .uncommon() if == nil { return false } := 0 := .methods() for := 0; < int(.mcount); ++ { := &.methods[] := .nameOff(.name) := [] := .nameOff(.name) if .name() == .name() && .typeOff(.mtyp) == .typeOff(.typ) { if !.isExported() { := .pkgPath() if == "" { = .pkgPath.name() } := .pkgPath() if == "" { = .nameOff(.pkgPath).name() } if != { continue } } if ++; >= len(.methods) { return true } } } return false } // specialChannelAssignability reports whether a value x of channel type V // can be directly assigned (using memmove) to another channel type T. // https://golang.org/doc/go_spec.html#Assignability // T and V must be both of Chan kind. func (, *rtype) bool { // Special case: // x is a bidirectional channel value, T is a channel type, // x's type V and T have identical element types, // and at least one of V or T is not a defined type. return .ChanDir() == BothDir && (.Name() == "" || .Name() == "") && haveIdenticalType(.Elem(), .Elem(), true) } // directlyAssignable reports whether a value x of type V can be directly // assigned (using memmove) to a value of type T. // https://golang.org/doc/go_spec.html#Assignability // Ignoring the interface rules (implemented elsewhere) // and the ideal constant rules (no ideal constants at run time). func (, *rtype) bool { // x's type V is identical to T? if == { return true } // Otherwise at least one of T and V must not be defined // and they must have the same kind. if .hasName() && .hasName() || .Kind() != .Kind() { return false } if .Kind() == Chan && specialChannelAssignability(, ) { return true } // x's type T and V must have identical underlying types. return haveIdenticalUnderlyingType(, , true) } func (, Type, bool) bool { if { return == } if .Name() != .Name() || .Kind() != .Kind() || .PkgPath() != .PkgPath() { return false } return haveIdenticalUnderlyingType(.common(), .common(), false) } func (, *rtype, bool) bool { if == { return true } := .Kind() if != .Kind() { return false } // Non-composite types of equal kind have same underlying type // (the predefined instance of the type). if Bool <= && <= Complex128 || == String || == UnsafePointer { return true } // Composite types. switch { case Array: return .Len() == .Len() && haveIdenticalType(.Elem(), .Elem(), ) case Chan: return .ChanDir() == .ChanDir() && haveIdenticalType(.Elem(), .Elem(), ) case Func: := (*funcType)(unsafe.Pointer()) := (*funcType)(unsafe.Pointer()) if .outCount != .outCount || .inCount != .inCount { return false } for := 0; < .NumIn(); ++ { if !haveIdenticalType(.In(), .In(), ) { return false } } for := 0; < .NumOut(); ++ { if !haveIdenticalType(.Out(), .Out(), ) { return false } } return true case Interface: := (*interfaceType)(unsafe.Pointer()) := (*interfaceType)(unsafe.Pointer()) if len(.methods) == 0 && len(.methods) == 0 { return true } // Might have the same methods but still // need a run time conversion. return false case Map: return haveIdenticalType(.Key(), .Key(), ) && haveIdenticalType(.Elem(), .Elem(), ) case Pointer, Slice: return haveIdenticalType(.Elem(), .Elem(), ) case Struct: := (*structType)(unsafe.Pointer()) := (*structType)(unsafe.Pointer()) if len(.fields) != len(.fields) { return false } if .pkgPath.name() != .pkgPath.name() { return false } for := range .fields { := &.fields[] := &.fields[] if .name.name() != .name.name() { return false } if !haveIdenticalType(.typ, .typ, ) { return false } if && .name.tag() != .name.tag() { return false } if .offsetEmbed != .offsetEmbed { return false } } return true } return false } // typelinks is implemented in package runtime. // It returns a slice of the sections in each module, // and a slice of *rtype offsets in each module. // // The types in each module are sorted by string. That is, the first // two linked types of the first module are: // // d0 := sections[0] // t1 := (*rtype)(add(d0, offset[0][0])) // t2 := (*rtype)(add(d0, offset[0][1])) // // and // // t1.String() < t2.String() // // Note that strings are not unique identifiers for types: // there can be more than one with a given string. // Only types we might want to look up are included: // pointers, channels, maps, slices, and arrays. func () ( []unsafe.Pointer, [][]int32) func ( unsafe.Pointer, int32) *rtype { return (*rtype)(add(, uintptr(), "sizeof(rtype) > 0")) } // typesByString returns the subslice of typelinks() whose elements have // the given string representation. // It may be empty (no known types with that string) or may have // multiple elements (multiple types with that string). func ( string) []*rtype { , := typelinks() var []*rtype for , := range { := [] // We are looking for the first index i where the string becomes >= s. // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). , := 0, len() for < { := + (-)>>1 // avoid overflow when computing h // i ≤ h < j if !(rtypeOff(, []).String() >= ) { = + 1 // preserves f(i-1) == false } else { = // preserves f(j) == true } } // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. // Having found the first, linear scan forward to find the last. // We could do a second binary search, but the caller is going // to do a linear scan anyway. for := ; < len(); ++ { := rtypeOff(, []) if .String() != { break } = append(, ) } } return } // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups. var lookupCache sync.Map // map[cacheKey]*rtype // A cacheKey is the key for use in the lookupCache. // Four values describe any of the types we are looking for: // type kind, one or two subtypes, and an extra integer. type cacheKey struct { kind Kind t1 *rtype t2 *rtype extra uintptr } // The funcLookupCache caches FuncOf lookups. // FuncOf does not share the common lookupCache since cacheKey is not // sufficient to represent functions unambiguously. var funcLookupCache struct { sync.Mutex // Guards stores (but not loads) on m. // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. // Elements of m are append-only and thus safe for concurrent reading. m sync.Map } // ChanOf returns the channel type with the given direction and element type. // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. // // The gc runtime imposes a limit of 64 kB on channel element types. // If t's size is equal to or exceeds this limit, ChanOf panics. func ( ChanDir, Type) Type { := .(*rtype) // Look in cache. := cacheKey{Chan, , nil, uintptr()} if , := lookupCache.Load(); { return .(*rtype) } // This restriction is imposed by the gc compiler and the runtime. if .size >= 1<<16 { panic("reflect.ChanOf: element size too large") } // Look in known types. var string switch { default: panic("reflect.ChanOf: invalid dir") case SendDir: = "chan<- " + .String() case RecvDir: = "<-chan " + .String() case BothDir: := .String() if [0] == '<' { // typ is recv chan, need parentheses as "<-" associates with leftmost // chan possible, see: // * https://golang.org/ref/spec#Channel_types // * https://github.com/golang/go/issues/39897 = "chan (" + + ")" } else { = "chan " + } } for , := range typesByString() { := (*chanType)(unsafe.Pointer()) if .elem == && .dir == uintptr() { , := lookupCache.LoadOrStore(, ) return .(Type) } } // Make a channel type. var any = (chan unsafe.Pointer)(nil) := *(**chanType)(unsafe.Pointer(&)) := * .tflag = tflagRegularMemory .dir = uintptr() .str = resolveReflectName(newName(, "", false)) .hash = fnv1(.hash, 'c', byte()) .elem = , := lookupCache.LoadOrStore(, &.rtype) return .(Type) } // MapOf returns the map type with the given key and element types. // For example, if k represents int and e represents string, // MapOf(k, e) represents map[int]string. // // If the key type is not a valid map key type (that is, if it does // not implement Go's == operator), MapOf panics. func (, Type) Type { := .(*rtype) := .(*rtype) if .equal == nil { panic("reflect.MapOf: invalid key type " + .String()) } // Look in cache. := cacheKey{Map, , , 0} if , := lookupCache.Load(); { return .(Type) } // Look in known types. := "map[" + .String() + "]" + .String() for , := range typesByString() { := (*mapType)(unsafe.Pointer()) if .key == && .elem == { , := lookupCache.LoadOrStore(, ) return .(Type) } } // Make a map type. // Note: flag values must match those used in the TMAP case // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. var any = (map[unsafe.Pointer]unsafe.Pointer)(nil) := **(**mapType)(unsafe.Pointer(&)) .str = resolveReflectName(newName(, "", false)) .tflag = 0 .hash = fnv1(.hash, 'm', byte(.hash>>24), byte(.hash>>16), byte(.hash>>8), byte(.hash)) .key = .elem = .bucket = bucketOf(, ) .hasher = func( unsafe.Pointer, uintptr) uintptr { return typehash(, , ) } .flags = 0 if .size > maxKeySize { .keysize = uint8(goarch.PtrSize) .flags |= 1 // indirect key } else { .keysize = uint8(.size) } if .size > maxValSize { .valuesize = uint8(goarch.PtrSize) .flags |= 2 // indirect value } else { .valuesize = uint8(.size) } .bucketsize = uint16(.bucket.size) if isReflexive() { .flags |= 4 } if needKeyUpdate() { .flags |= 8 } if hashMightPanic() { .flags |= 16 } .ptrToThis = 0 , := lookupCache.LoadOrStore(, &.rtype) return .(Type) } // TODO(crawshaw): as these funcTypeFixedN structs have no methods, // they could be defined at runtime using the StructOf function. type funcTypeFixed4 struct { funcType args [4]*rtype } type funcTypeFixed8 struct { funcType args [8]*rtype } type funcTypeFixed16 struct { funcType args [16]*rtype } type funcTypeFixed32 struct { funcType args [32]*rtype } type funcTypeFixed64 struct { funcType args [64]*rtype } type funcTypeFixed128 struct { funcType args [128]*rtype } // FuncOf returns the function type with the given argument and result types. // For example if k represents int and e represents string, // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. // // The variadic argument controls whether the function is variadic. FuncOf // panics if the in[len(in)-1] does not represent a slice and variadic is // true. func (, []Type, bool) Type { if && (len() == 0 || [len()-1].Kind() != Slice) { panic("reflect.FuncOf: last arg of variadic func must be slice") } // Make a func type. var any = (func())(nil) := *(**funcType)(unsafe.Pointer(&)) := len() + len() var *funcType var []*rtype switch { case <= 4: := new(funcTypeFixed4) = .args[:0:len(.args)] = &.funcType case <= 8: := new(funcTypeFixed8) = .args[:0:len(.args)] = &.funcType case <= 16: := new(funcTypeFixed16) = .args[:0:len(.args)] = &.funcType case <= 32: := new(funcTypeFixed32) = .args[:0:len(.args)] = &.funcType case <= 64: := new(funcTypeFixed64) = .args[:0:len(.args)] = &.funcType case <= 128: := new(funcTypeFixed128) = .args[:0:len(.args)] = &.funcType default: panic("reflect.FuncOf: too many arguments") } * = * // Build a hash and minimally populate ft. var uint32 for , := range { := .(*rtype) = append(, ) = fnv1(, byte(.hash>>24), byte(.hash>>16), byte(.hash>>8), byte(.hash)) } if { = fnv1(, 'v') } = fnv1(, '.') for , := range { := .(*rtype) = append(, ) = fnv1(, byte(.hash>>24), byte(.hash>>16), byte(.hash>>8), byte(.hash)) } if len() > 50 { panic("reflect.FuncOf does not support more than 50 arguments") } .tflag = 0 .hash = .inCount = uint16(len()) .outCount = uint16(len()) if { .outCount |= 1 << 15 } // Look in cache. if , := funcLookupCache.m.Load(); { for , := range .([]*rtype) { if haveIdenticalUnderlyingType(&.rtype, , true) { return } } } // Not in cache, lock and retry. funcLookupCache.Lock() defer funcLookupCache.Unlock() if , := funcLookupCache.m.Load(); { for , := range .([]*rtype) { if haveIdenticalUnderlyingType(&.rtype, , true) { return } } } := func( *rtype) Type { var []*rtype if , := funcLookupCache.m.Load(); { = .([]*rtype) } funcLookupCache.m.Store(, append(, )) return } // Look in known types for the same string representation. := funcStr() for , := range typesByString() { if haveIdenticalUnderlyingType(&.rtype, , true) { return () } } // Populate the remaining fields of ft and store in cache. .str = resolveReflectName(newName(, "", false)) .ptrToThis = 0 return (&.rtype) } // funcStr builds a string representation of a funcType. func ( *funcType) string { := make([]byte, 0, 64) = append(, "func("...) for , := range .in() { if > 0 { = append(, ", "...) } if .IsVariadic() && == int(.inCount)-1 { = append(, "..."...) = append(, (*sliceType)(unsafe.Pointer()).elem.String()...) } else { = append(, .String()...) } } = append(, ')') := .out() if len() == 1 { = append(, ' ') } else if len() > 1 { = append(, " ("...) } for , := range { if > 0 { = append(, ", "...) } = append(, .String()...) } if len() > 1 { = append(, ')') } return string() } // isReflexive reports whether the == operation on the type is reflexive. // That is, x == x for all values x of type t. func ( *rtype) bool { switch .Kind() { case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer: return true case Float32, Float64, Complex64, Complex128, Interface: return false case Array: := (*arrayType)(unsafe.Pointer()) return (.elem) case Struct: := (*structType)(unsafe.Pointer()) for , := range .fields { if !(.typ) { return false } } return true default: // Func, Map, Slice, Invalid panic("isReflexive called on non-key type " + .String()) } } // needKeyUpdate reports whether map overwrites require the key to be copied. func ( *rtype) bool { switch .Kind() { case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer: return false case Float32, Float64, Complex64, Complex128, Interface, String: // Float keys can be updated from +0 to -0. // String keys can be updated to use a smaller backing store. // Interfaces might have floats of strings in them. return true case Array: := (*arrayType)(unsafe.Pointer()) return (.elem) case Struct: := (*structType)(unsafe.Pointer()) for , := range .fields { if (.typ) { return true } } return false default: // Func, Map, Slice, Invalid panic("needKeyUpdate called on non-key type " + .String()) } } // hashMightPanic reports whether the hash of a map key of type t might panic. func ( *rtype) bool { switch .Kind() { case Interface: return true case Array: := (*arrayType)(unsafe.Pointer()) return (.elem) case Struct: := (*structType)(unsafe.Pointer()) for , := range .fields { if (.typ) { return true } } return false default: return false } } // Make sure these routines stay in sync with ../../runtime/map.go! // These types exist only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in string // for possible debugging use. const ( bucketSize uintptr = 8 maxKeySize uintptr = 128 maxValSize uintptr = 128 ) func (, *rtype) *rtype { if .size > maxKeySize { = PointerTo().(*rtype) } if .size > maxValSize { = PointerTo().(*rtype) } // Prepare GC data if any. // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. // Note that since the key and value are known to be <= 128 bytes, // they're guaranteed to have bitmaps instead of GC programs. var *byte var uintptr var uintptr := bucketSize*(1+.size+.size) + + goarch.PtrSize if &uintptr(.align-1) != 0 || &uintptr(.align-1) != 0 { panic("reflect: bad size computation in MapOf") } if .ptrdata != 0 || .ptrdata != 0 { := (bucketSize*(1+.size+.size) + goarch.PtrSize) / goarch.PtrSize := make([]byte, (+7)/8) := bucketSize / goarch.PtrSize if .ptrdata != 0 { emitGCMask(, , , bucketSize) } += bucketSize * .size / goarch.PtrSize if .ptrdata != 0 { emitGCMask(, , , bucketSize) } += bucketSize * .size / goarch.PtrSize += / goarch.PtrSize := [/8] |= 1 << ( % 8) = &[0] = ( + 1) * goarch.PtrSize // overflow word must be last if != { panic("reflect: bad layout computation in MapOf") } } := &rtype{ align: goarch.PtrSize, size: , kind: uint8(Struct), ptrdata: , gcdata: , } if > 0 { .align = 8 } := "bucket(" + .String() + "," + .String() + ")" .str = resolveReflectName(newName(, "", false)) return } func ( *rtype) (, uintptr) []byte { return (*[1 << 30]byte)(unsafe.Pointer(.gcdata))[::] } // emitGCMask writes the GC mask for [n]typ into out, starting at bit // offset base. func ( []byte, uintptr, *rtype, uintptr) { if .kind&kindGCProg != 0 { panic("reflect: unexpected GC program") } := .ptrdata / goarch.PtrSize := .size / goarch.PtrSize := .gcSlice(0, (+7)/8) for := uintptr(0); < ; ++ { if ([/8]>>(%8))&1 != 0 { for := uintptr(0); < ; ++ { := + * + [/8] |= 1 << ( % 8) } } } } // appendGCProg appends the GC program for the first ptrdata bytes of // typ to dst and returns the extended slice. func ( []byte, *rtype) []byte { if .kind&kindGCProg != 0 { // Element has GC program; emit one element. := uintptr(*(*uint32)(unsafe.Pointer(.gcdata))) := .gcSlice(4, 4+-1) return append(, ...) } // Element is small with pointer mask; use as literal bits. := .ptrdata / goarch.PtrSize := .gcSlice(0, (+7)/8) // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). for ; > 120; -= 120 { = append(, 120) = append(, [:15]...) = [15:] } = append(, byte()) = append(, ...) return } // SliceOf returns the slice type with element type t. // For example, if t represents int, SliceOf(t) represents []int. func ( Type) Type { := .(*rtype) // Look in cache. := cacheKey{Slice, , nil, 0} if , := lookupCache.Load(); { return .(Type) } // Look in known types. := "[]" + .String() for , := range typesByString() { := (*sliceType)(unsafe.Pointer()) if .elem == { , := lookupCache.LoadOrStore(, ) return .(Type) } } // Make a slice type. var any = ([]unsafe.Pointer)(nil) := *(**sliceType)(unsafe.Pointer(&)) := * .tflag = 0 .str = resolveReflectName(newName(, "", false)) .hash = fnv1(.hash, '[') .elem = .ptrToThis = 0 , := lookupCache.LoadOrStore(, &.rtype) return .(Type) } // The structLookupCache caches StructOf lookups. // StructOf does not share the common lookupCache since we need to pin // the memory associated with *structTypeFixedN. var structLookupCache struct { sync.Mutex // Guards stores (but not loads) on m. // m is a map[uint32][]Type keyed by the hash calculated in StructOf. // Elements in m are append-only and thus safe for concurrent reading. m sync.Map } type structTypeUncommon struct { structType u uncommonType } // isLetter reports whether a given 'rune' is classified as a Letter. func ( rune) bool { return 'a' <= && <= 'z' || 'A' <= && <= 'Z' || == '_' || >= utf8.RuneSelf && unicode.IsLetter() } // isValidFieldName checks if a string is a valid (struct) field name or not. // // According to the language spec, a field name should be an identifier. // // identifier = letter { letter | unicode_digit } . // letter = unicode_letter | "_" . func ( string) bool { for , := range { if == 0 && !isLetter() { return false } if !(isLetter() || unicode.IsDigit()) { return false } } return len() > 0 } // StructOf returns the struct type containing fields. // The Offset and Index fields are ignored and computed as they would be // by the compiler. // // StructOf currently does not generate wrapper methods for embedded // fields and panics if passed unexported StructFields. // These limitations may be lifted in a future version. func ( []StructField) Type { var ( = fnv1(0, []byte("struct {")...) uintptr uint8 = true []method = make([]structField, len()) = make([]byte, 0, 64) = map[string]struct{}{} // fields' names = false // records whether a struct-field type has a GCProg ) := uintptr(0) = append(, "struct {"...) := "" for , := range { if .Name == "" { panic("reflect.StructOf: field " + strconv.Itoa() + " has no name") } if !isValidFieldName(.Name) { panic("reflect.StructOf: field " + strconv.Itoa() + " has invalid name") } if .Type == nil { panic("reflect.StructOf: field " + strconv.Itoa() + " has no type") } , := runtimeStructField() := .typ if .kind&kindGCProg != 0 { = true } if != "" { if == "" { = } else if != { panic("reflect.Struct: fields with different PkgPath " + + " and " + ) } } // Update string and hash := .name.name() = fnv1(, []byte()...) = append(, (" " + )...) if .embedded() { // Embedded field if .typ.Kind() == Pointer { // Embedded ** and *interface{} are illegal := .Elem() if := .Kind(); == Pointer || == Interface { panic("reflect.StructOf: illegal embedded field type " + .String()) } } switch .typ.Kind() { case Interface: := (*interfaceType)(unsafe.Pointer()) for , := range .methods { if .nameOff(.name).pkgPath() != "" { // TODO(sbinet). Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } var ( = .typeOff(.typ) = = Value Value ) if .kind&kindDirectIface != 0 { = MakeFunc(, func( []Value) []Value { var []Value var = [0] if len() > 1 { = [1:] } return .Field().Method().Call() }) = MakeFunc(, func( []Value) []Value { var []Value var = [0] if len() > 1 { = [1:] } return .Field().Method().Call() }) } else { = MakeFunc(, func( []Value) []Value { var []Value var = [0] if len() > 1 { = [1:] } return .Field().Method().Call() }) = MakeFunc(, func( []Value) []Value { var []Value var = Indirect([0]) if len() > 1 { = [1:] } return .Field().Method().Call() }) } = append(, method{ name: resolveReflectName(.nameOff(.name)), mtyp: resolveReflectType(), ifn: resolveReflectText(unsafe.Pointer(&)), tfn: resolveReflectText(unsafe.Pointer(&)), }) } case Pointer: := (*ptrType)(unsafe.Pointer()) if := .uncommon(); != nil { if > 0 && .mcount > 0 { // Issue 15924. panic("reflect: embedded type with methods not implemented if type is not first field") } if len() > 1 { panic("reflect: embedded type with methods not implemented if there is more than one field") } for , := range .methods() { := .nameOff(.name) if .pkgPath() != "" { // TODO(sbinet). // Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } = append(, method{ name: resolveReflectName(), mtyp: resolveReflectType(.typeOff(.mtyp)), ifn: resolveReflectText(.textOff(.ifn)), tfn: resolveReflectText(.textOff(.tfn)), }) } } if := .elem.uncommon(); != nil { for , := range .methods() { := .nameOff(.name) if .pkgPath() != "" { // TODO(sbinet) // Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } = append(, method{ name: resolveReflectName(), mtyp: resolveReflectType(.elem.typeOff(.mtyp)), ifn: resolveReflectText(.elem.textOff(.ifn)), tfn: resolveReflectText(.elem.textOff(.tfn)), }) } } default: if := .uncommon(); != nil { if > 0 && .mcount > 0 { // Issue 15924. panic("reflect: embedded type with methods not implemented if type is not first field") } if len() > 1 && .kind&kindDirectIface != 0 { panic("reflect: embedded type with methods not implemented for non-pointer type") } for , := range .methods() { := .nameOff(.name) if .pkgPath() != "" { // TODO(sbinet) // Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") } = append(, method{ name: resolveReflectName(), mtyp: resolveReflectType(.typeOff(.mtyp)), ifn: resolveReflectText(.textOff(.ifn)), tfn: resolveReflectText(.textOff(.tfn)), }) } } } } if , := []; && != "_" { panic("reflect.StructOf: duplicate field " + ) } [] = struct{}{} = fnv1(, byte(.hash>>24), byte(.hash>>16), byte(.hash>>8), byte(.hash)) = append(, (" " + .String())...) if .name.hasTag() { = fnv1(, []byte(.name.tag())...) = append(, (" " + strconv.Quote(.name.tag()))...) } if < len()-1 { = append(, ';') } = && (.equal != nil) := align(, uintptr(.align)) if .align > { = .align } = + .size .offsetEmbed |= << 1 if .size == 0 { = } [] = } if > 0 && == { // This is a non-zero sized struct that ends in a // zero-sized field. We add an extra byte of padding, // to ensure that taking the address of the final // zero-sized field can't manufacture a pointer to the // next object in the heap. See issue 9401. ++ } var *structType var *uncommonType if len() == 0 { := new(structTypeUncommon) = &.structType = &.u } else { // A *rtype representing a struct is followed directly in memory by an // array of method objects representing the methods attached to the // struct. To get the same layout for a run time generated type, we // need an array directly following the uncommonType memory. // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. := New(([]StructField{ {Name: "S", Type: TypeOf(structType{})}, {Name: "U", Type: TypeOf(uncommonType{})}, {Name: "M", Type: ArrayOf(len(), TypeOf([0]))}, })) = (*structType)(.Elem().Field(0).Addr().UnsafePointer()) = (*uncommonType)(.Elem().Field(1).Addr().UnsafePointer()) copy(.Elem().Field(2).Slice(0, len()).Interface().([]method), ) } // TODO(sbinet): Once we allow embedding multiple types, // methods will need to be sorted like the compiler does. // TODO(sbinet): Once we allow non-exported methods, we will // need to compute xcount as the number of exported methods. .mcount = uint16(len()) .xcount = .mcount .moff = uint32(unsafe.Sizeof(uncommonType{})) if len() > 0 { = append(, ' ') } = append(, '}') = fnv1(, '}') := string() // Round the size up to be a multiple of the alignment. = align(, uintptr()) // Make the struct type. var any = struct{}{} := *(**structType)(unsafe.Pointer(&)) * = * .fields = if != "" { .pkgPath = newName(, "", false) } // Look in cache. if , := structLookupCache.m.Load(); { for , := range .([]Type) { := .common() if haveIdenticalUnderlyingType(&.rtype, , true) { return } } } // Not in cache, lock and retry. structLookupCache.Lock() defer structLookupCache.Unlock() if , := structLookupCache.m.Load(); { for , := range .([]Type) { := .common() if haveIdenticalUnderlyingType(&.rtype, , true) { return } } } := func( Type) Type { var []Type if , := structLookupCache.m.Load(); { = .([]Type) } structLookupCache.m.Store(, append(, )) return } // Look in known types. for , := range typesByString() { if haveIdenticalUnderlyingType(&.rtype, , true) { // even if 't' wasn't a structType with methods, we should be ok // as the 'u uncommonType' field won't be accessed except when // tflag&tflagUncommon is set. return () } } .str = resolveReflectName(newName(, "", false)) .tflag = 0 // TODO: set tflagRegularMemory .hash = .size = .ptrdata = typeptrdata(.common()) .align = .fieldAlign = .ptrToThis = 0 if len() > 0 { .tflag |= tflagUncommon } if { := 0 for , := range { if .typ.pointers() { = } } := []byte{0, 0, 0, 0} // will be length of prog var uintptr for , := range { if > { // gcprog should not include anything for any field after // the last field that contains pointer data break } if !.typ.pointers() { // Ignore pointerless fields. continue } // Pad to start of this field with zeros. if .offset() > { := (.offset() - ) / goarch.PtrSize = append(, 0x01, 0x00) // emit a 0 bit if > 1 { = append(, 0x81) // repeat previous bit = appendVarint(, -1) // n-1 times } = .offset() } = appendGCProg(, .typ) += .typ.ptrdata } = append(, 0) *(*uint32)(unsafe.Pointer(&[0])) = uint32(len() - 4) .kind |= kindGCProg .gcdata = &[0] } else { .kind &^= kindGCProg := new(bitVector) addTypeBits(, 0, .common()) if len(.data) > 0 { .gcdata = &.data[0] } } .equal = nil if { .equal = func(, unsafe.Pointer) bool { for , := range .fields { := add(, .offset(), "&x.field safe") := add(, .offset(), "&x.field safe") if !.typ.equal(, ) { return false } } return true } } switch { case len() == 1 && !ifaceIndir([0].typ): // structs of 1 direct iface type can be direct .kind |= kindDirectIface default: .kind &^= kindDirectIface } return (&.rtype) } // runtimeStructField takes a StructField value passed to StructOf and // returns both the corresponding internal representation, of type // structField, and the pkgpath value to use for this field. func ( StructField) (structField, string) { if .Anonymous && .PkgPath != "" { panic("reflect.StructOf: field \"" + .Name + "\" is anonymous but has PkgPath set") } if .IsExported() { // Best-effort check for misuse. // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through. := .Name[0] if 'a' <= && <= 'z' || == '_' { panic("reflect.StructOf: field \"" + .Name + "\" is unexported but missing PkgPath") } } := uintptr(0) if .Anonymous { |= 1 } resolveReflectType(.Type.common()) // install in runtime := structField{ name: newName(.Name, string(.Tag), .IsExported()), typ: .Type.common(), offsetEmbed: , } return , .PkgPath } // typeptrdata returns the length in bytes of the prefix of t // containing pointer data. Anything after this offset is scalar data. // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go func ( *rtype) uintptr { switch .Kind() { case Struct: := (*structType)(unsafe.Pointer()) // find the last field that has pointers. := -1 for := range .fields { := .fields[].typ if .pointers() { = } } if == -1 { return 0 } := .fields[] return .offset() + .typ.ptrdata default: panic("reflect.typeptrdata: unexpected type, " + .String()) } } // See cmd/compile/internal/reflectdata/reflect.go for derivation of constant. const maxPtrmaskBytes = 2048 // ArrayOf returns the array type with the given length and element type. // For example, if t represents int, ArrayOf(5, t) represents [5]int. // // If the resulting type would be larger than the available address space, // ArrayOf panics. func ( int, Type) Type { if < 0 { panic("reflect: negative length passed to ArrayOf") } := .(*rtype) // Look in cache. := cacheKey{Array, , nil, uintptr()} if , := lookupCache.Load(); { return .(Type) } // Look in known types. := "[" + strconv.Itoa() + "]" + .String() for , := range typesByString() { := (*arrayType)(unsafe.Pointer()) if .elem == { , := lookupCache.LoadOrStore(, ) return .(Type) } } // Make an array type. var any = [1]unsafe.Pointer{} := *(**arrayType)(unsafe.Pointer(&)) := * .tflag = .tflag & tflagRegularMemory .str = resolveReflectName(newName(, "", false)) .hash = fnv1(.hash, '[') for := uint32(); > 0; >>= 8 { .hash = fnv1(.hash, byte()) } .hash = fnv1(.hash, ']') .elem = .ptrToThis = 0 if .size > 0 { := ^uintptr(0) / .size if uintptr() > { panic("reflect.ArrayOf: array size would exceed virtual address space") } } .size = .size * uintptr() if > 0 && .ptrdata != 0 { .ptrdata = .size*uintptr(-1) + .ptrdata } .align = .align .fieldAlign = .fieldAlign .len = uintptr() .slice = SliceOf().(*rtype) switch { case .ptrdata == 0 || .size == 0: // No pointers. .gcdata = nil .ptrdata = 0 case == 1: // In memory, 1-element array looks just like the element. .kind |= .kind & kindGCProg .gcdata = .gcdata .ptrdata = .ptrdata case .kind&kindGCProg == 0 && .size <= maxPtrmaskBytes*8*goarch.PtrSize: // Element is small with pointer mask; array is still small. // Create direct pointer mask by turning each 1 bit in elem // into length 1 bits in larger mask. := make([]byte, (.ptrdata/goarch.PtrSize+7)/8) emitGCMask(, 0, , .len) .gcdata = &[0] default: // Create program that emits one element // and then repeats to make the array. := []byte{0, 0, 0, 0} // will be length of prog = appendGCProg(, ) // Pad from ptrdata to size. := .ptrdata / goarch.PtrSize := .size / goarch.PtrSize if < { // Emit literal 0 bit, then repeat as needed. = append(, 0x01, 0x00) if +1 < { = append(, 0x81) = appendVarint(, --1) } } // Repeat length-1 times. if < 0x80 { = append(, byte(|0x80)) } else { = append(, 0x80) = appendVarint(, ) } = appendVarint(, uintptr()-1) = append(, 0) *(*uint32)(unsafe.Pointer(&[0])) = uint32(len() - 4) .kind |= kindGCProg .gcdata = &[0] .ptrdata = .size // overestimate but ok; must match program } := .common() := .Size() .equal = nil if := .equal; != nil { .equal = func(, unsafe.Pointer) bool { for := 0; < ; ++ { := arrayAt(, , , "i < length") := arrayAt(, , , "i < length") if !(, ) { return false } } return true } } switch { case == 1 && !ifaceIndir(): // array of 1 direct iface type can be direct .kind |= kindDirectIface default: .kind &^= kindDirectIface } , := lookupCache.LoadOrStore(, &.rtype) return .(Type) } func ( []byte, uintptr) []byte { for ; >= 0x80; >>= 7 { = append(, byte(|0x80)) } = append(, byte()) return } // toType converts from a *rtype to a Type that can be returned // to the client of package reflect. In gc, the only concern is that // a nil *rtype must be replaced by a nil Type, but in gccgo this // function takes care of ensuring that multiple *rtype for the same // type are coalesced into a single Type. func ( *rtype) Type { if == nil { return nil } return } type layoutKey struct { ftyp *funcType // function signature rcvr *rtype // receiver type, or nil if none } type layoutType struct { t *rtype framePool *sync.Pool abi abiDesc } var layoutCache sync.Map // map[layoutKey]layoutType // funcLayout computes a struct type representing the layout of the // stack-assigned function arguments and return values for the function // type t. // If rcvr != nil, rcvr specifies the type of the receiver. // The returned type exists only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in // the name for possible debugging use. func ( *funcType, *rtype) ( *rtype, *sync.Pool, abiDesc) { if .Kind() != Func { panic("reflect: funcLayout of non-func type " + .String()) } if != nil && .Kind() == Interface { panic("reflect: funcLayout with interface receiver " + .String()) } := layoutKey{, } if , := layoutCache.Load(); { := .(layoutType) return .t, .framePool, .abi } // Compute the ABI layout. = newAbiDesc(, ) // build dummy rtype holding gc program := &rtype{ align: goarch.PtrSize, // Don't add spill space here; it's only necessary in // reflectcall's frame, not in the allocated frame. // TODO(mknyszek): Remove this comment when register // spill space in the frame is no longer required. size: align(.retOffset+.ret.stackBytes, goarch.PtrSize), ptrdata: uintptr(.stackPtrs.n) * goarch.PtrSize, } if .stackPtrs.n > 0 { .gcdata = &.stackPtrs.data[0] } var string if != nil { = "methodargs(" + .String() + ")(" + .String() + ")" } else { = "funcargs(" + .String() + ")" } .str = resolveReflectName(newName(, "", false)) // cache result for future callers = &sync.Pool{New: func() any { return unsafe_New() }} , := layoutCache.LoadOrStore(, layoutType{ t: , framePool: , abi: , }) := .(layoutType) return .t, .framePool, .abi } // ifaceIndir reports whether t is stored indirectly in an interface value. func ( *rtype) bool { return .kind&kindDirectIface == 0 } // Note: this type must agree with runtime.bitvector. type bitVector struct { n uint32 // number of bits data []byte } // append a bit to the bitmap. func ( *bitVector) ( uint8) { if .n%8 == 0 { .data = append(.data, 0) } .data[.n/8] |= << (.n % 8) .n++ } func ( *bitVector, uintptr, *rtype) { if .ptrdata == 0 { return } switch Kind(.kind & kindMask) { case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: // 1 pointer at start of representation for .n < uint32(/uintptr(goarch.PtrSize)) { .append(0) } .append(1) case Interface: // 2 pointers for .n < uint32(/uintptr(goarch.PtrSize)) { .append(0) } .append(1) .append(1) case Array: // repeat inner type := (*arrayType)(unsafe.Pointer()) for := 0; < int(.len); ++ { (, +uintptr()*.elem.size, .elem) } case Struct: // apply fields := (*structType)(unsafe.Pointer()) for := range .fields { := &.fields[] (, +.offset(), .typ) } } }