// Copyright 2019 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.// Indexed package export.//// The indexed export data format is an evolution of the previous// binary export data format. Its chief contribution is introducing an// index table, which allows efficient random access of individual// declarations and inline function bodies. In turn, this allows// avoiding unnecessary work for compilation units that import large// packages.////// The top-level data format is structured as://// Header struct {// Tag byte // 'i'// Version uvarint// StringSize uvarint// DataSize uvarint// }//// Strings [StringSize]byte// Data [DataSize]byte//// MainIndex []struct{// PkgPath stringOff// PkgName stringOff// PkgHeight uvarint//// Decls []struct{// Name stringOff// Offset declOff// }// }//// Fingerprint [8]byte//// uvarint means a uint64 written out using uvarint encoding.//// []T means a uvarint followed by that many T objects. In other// words://// Len uvarint// Elems [Len]T//// stringOff means a uvarint that indicates an offset within the// Strings section. At that offset is another uvarint, followed by// that many bytes, which form the string value.//// declOff means a uvarint that indicates an offset within the Data// section where the associated declaration can be found.////// There are five kinds of declarations, distinguished by their first// byte://// type Var struct {// Tag byte // 'V'// Pos Pos// Type typeOff// }//// type Func struct {// Tag byte // 'F' or 'G'// Pos Pos// TypeParams []typeOff // only present if Tag == 'G'// Signature Signature// }//// type Const struct {// Tag byte // 'C'// Pos Pos// Value Value// }//// type Type struct {// Tag byte // 'T' or 'U'// Pos Pos// TypeParams []typeOff // only present if Tag == 'U'// Underlying typeOff//// Methods []struct{ // omitted if Underlying is an interface type// Pos Pos// Name stringOff// Recv Param// Signature Signature// }// }//// type Alias struct {// Tag byte // 'A' or 'B'// Pos Pos// TypeParams []typeOff // only present if Tag == 'B'// Type typeOff// }//// // "Automatic" declaration of each typeparam// type TypeParam struct {// Tag byte // 'P'// Pos Pos// Implicit bool// Constraint typeOff// }//// typeOff means a uvarint that either indicates a predeclared type,// or an offset into the Data section. If the uvarint is less than// predeclReserved, then it indicates the index into the predeclared// types list (see predeclared in bexport.go for order). Otherwise,// subtracting predeclReserved yields the offset of a type descriptor.//// Value means a type, kind, and type-specific value. See// (*exportWriter).value for details.////// There are twelve kinds of type descriptors, distinguished by an itag://// type DefinedType struct {// Tag itag // definedType// Name stringOff// PkgPath stringOff// }//// type PointerType struct {// Tag itag // pointerType// Elem typeOff// }//// type SliceType struct {// Tag itag // sliceType// Elem typeOff// }//// type ArrayType struct {// Tag itag // arrayType// Len uint64// Elem typeOff// }//// type ChanType struct {// Tag itag // chanType// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv// Elem typeOff// }//// type MapType struct {// Tag itag // mapType// Key typeOff// Elem typeOff// }//// type FuncType struct {// Tag itag // signatureType// PkgPath stringOff// Signature Signature// }//// type StructType struct {// Tag itag // structType// PkgPath stringOff// Fields []struct {// Pos Pos// Name stringOff// Type typeOff// Embedded bool// Note stringOff// }// }//// type InterfaceType struct {// Tag itag // interfaceType// PkgPath stringOff// Embeddeds []struct {// Pos Pos// Type typeOff// }// Methods []struct {// Pos Pos// Name stringOff// Signature Signature// }// }//// // Reference to a type param declaration// type TypeParamType struct {// Tag itag // typeParamType// Name stringOff// PkgPath stringOff// }//// // Instantiation of a generic type (like List[T2] or List[int])// type InstanceType struct {// Tag itag // instanceType// Pos pos// TypeArgs []typeOff// BaseType typeOff// }//// type UnionType struct {// Tag itag // interfaceType// Terms []struct {// tilde bool// Type typeOff// }// }//////// type Signature struct {// Params []Param// Results []Param// Variadic bool // omitted if Results is empty// }//// type Param struct {// Pos Pos// Name stringOff// Type typOff// }////// Pos encodes a file:line:column triple, incorporating a simple delta// encoding scheme within a data object. See exportWriter.pos for// details.package gcimporterimport ()// IExportShallow encodes "shallow" export data for the specified package.//// For types, we use "shallow" export data. Historically, the Go// compiler always produced a summary of the types for a given package// that included types from other packages that it indirectly// referenced: "deep" export data. This had the advantage that the// compiler (and analogous tools such as gopls) need only load one// file per direct import. However, it meant that the files tended to// get larger based on the level of the package in the import// graph. For example, higher-level packages in the kubernetes module// have over 1MB of "deep" export data, even when they have almost no// content of their own, merely because they mention a major type that// references many others. In pathological cases the export data was// 300x larger than the source for a package due to this quadratic// growth.//// "Shallow" export data means that the serialized types describe only// a single package. If those types mention types from other packages,// the type checker may need to request additional packages beyond// just the direct imports. Type information for the entire transitive// closure of imports is provided (lazily) by the DAG.//// No promises are made about the encoding other than that it can be decoded by// the same version of IIExportShallow. If you plan to save export data in the// file system, be sure to include a cryptographic digest of the executable in// the key to avoid version skew.//// If the provided reportf func is non-nil, it is used for reporting// bugs (e.g. recovered panics) encountered during export, enabling us// to obtain via telemetry the stack that would otherwise be lost by// merely returning an error.func ( *token.FileSet, *types.Package, ReportFunc) ([]byte, error) {// In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of // fact iexportCommon doesn't even check for I/O errors. // TODO(adonovan): handle I/O errors properly. // TODO(adonovan): use byte slices throughout, avoiding copying.const , = false, truevarbytes.Buffer := iexportCommon(&, , , , iexportVersion, []*types.Package{}, )return .Bytes(), }// IImportShallow decodes "shallow" types.Package data encoded by// [IExportShallow] in the same executable. This function cannot import data// from cmd/compile or gcexportdata.Write.//// The importer calls getPackages to obtain package symbols for all// packages mentioned in the export data, including the one being// decoded.//// If the provided reportf func is non-nil, it will be used for reporting bugs// encountered during import.// TODO(rfindley): remove reportf when we are confident enough in the new// objectpath encoding.func ( *token.FileSet, GetPackagesFunc, []byte, string, ReportFunc) (*types.Package, error) {const = falseconst = true , := iimportCommon(, , , , , , )if != nil {returnnil, }return [0], nil}// ReportFunc is the type of a function used to report formatted bugs.typeReportFunc = func(string, ...any)// Current bundled export format version. Increase with each format change.// 0: initial implementationconstbundleVersion = 0// IExportData writes indexed export data for pkg to out.//// If no file set is provided, position info will be missing.// The package path of the top-level package will not be recorded,// so that calls to IImportData can override with a provided package path.func ( io.Writer, *token.FileSet, *types.Package) error {const , = false, falsereturniexportCommon(, , , , iexportVersion, []*types.Package{}, nil)}// IExportBundle writes an indexed export bundle for pkgs to out.func ( io.Writer, *token.FileSet, []*types.Package) error {const , = true, falsereturniexportCommon(, , , , iexportVersion, , nil)}func ( io.Writer, *token.FileSet, , bool, int, []*types.Package, ReportFunc) ( error) {if !debug {deferfunc() {if := recover(); != nil {// Report the stack via telemetry (see #71067).if != nil { ("panic in exporter") }if , := .(internalError); {// internalError usually means we exported a // bad go/types data structure: a violation // of an implicit precondition of Export. = return }// Not an internal error; panic again.panic() } }() } := iexporter{fset: ,version: ,shallow: ,allPkgs: map[*types.Package]bool{},stringIndex: map[string]uint64{},declIndex: map[types.Object]uint64{},tparamNames: map[types.Object]string{},typIndex: map[types.Type]uint64{}, }if ! { .localpkg = [0] }for , := rangepredeclared() { .typIndex[] = uint64() }iflen(.typIndex) > predeclReserved {panic(internalErrorf("too many predeclared types: %d > %d", len(.typIndex), predeclReserved)) }// Initialize work queue with exported declarations.for , := range { := .Scope()for , := range .Names() {iftoken.IsExported() { .pushDecl(.Lookup()) } }if {// Ensure pkg and its imports are included in the index. .allPkgs[] = truefor , := range .Imports() { .allPkgs[] = true } } }// Loop until no more work.for !.declTodo.empty() { .doDecl(.declTodo.popHead()) }// Produce index of offset of each file record in files.varintWritervar []uint64// fileOffset[i] is offset in files of file encoded as iif .shallow { = make([]uint64, len(.fileInfos))for , := range .fileInfos { [] = uint64(.Len()) .encodeFile(&, .file, .needed) } }// Append indices to data0 section. := uint64(.data0.Len()) := .newWriter() .writeIndex(.declIndex)if { .uint64(uint64(len()))for , := range { .pkg() := .Imports() .uint64(uint64(len()))for , := range { .pkg() } } } .flush()// Assemble header.varintWriterif { .uint64(bundleVersion) } .uint64(uint64(.version)) .uint64(uint64(.strings.Len()))if .shallow { .uint64(uint64(.Len())) .uint64(uint64(len()))for , := range { .uint64() } } .uint64()// Flush output.io.Copy(, &)io.Copy(, &.strings)if .shallow {io.Copy(, &) }io.Copy(, &.data0)returnnil}// encodeFile writes to w a representation of the file sufficient to// faithfully restore position information about all needed offsets.// Mutates the needed array.func ( *iexporter) ( *intWriter, *token.File, []uint64) { _ = [0] // precondition: needed is non-empty .uint64(.stringOff(.Name())) := uint64(.Size()) .uint64()// Sort the set of needed offsets. Duplicates are harmless.slices.Sort() := .Lines() // byte offset of each line start .uint64(uint64(len()))// Rather than record the entire array of line start offsets, // we save only a sparse list of (index, offset) pairs for // the start of each line that contains a needed position.var [][2]int// (index, offset) pairs:for , := range { := if < len()-1 { = uint64([+1]) }// Does this line contains a needed offset?if [0] < { = append(, [2]int{, })for [0] < { = [1:]iflen() == 0 {break } } } }// Delta-encode the columns. .uint64(uint64(len()))var [2]intfor , := range { .uint64(uint64([0] - [0])) .uint64(uint64([1] - [1])) = }}// writeIndex writes out an object index. mainIndex indicates whether// we're writing out the main index, which is also read by// non-compiler tools and includes a complete package description// (i.e., name and height).func ( *exportWriter) ( map[types.Object]uint64) {typestruct {types.Objectstring// qualified name; differs from obj.Name for type params }// Build a map from packages to objects from that package. := map[*types.Package][]{}// For the main index, make sure to include every package that // we reference, even if we're not exporting (or reexporting) // any symbols from it.if .p.localpkg != nil { [.p.localpkg] = nil }for := range .p.allPkgs { [] = nil }for := range { := .p.exportName() [.Pkg()] = append([.Pkg()], {, }) }var []*types.Packagefor , := range { = append(, )sort.Slice(, func(, int) bool {return []. < []. }) }sort.Slice(, func(, int) bool {return .exportPath([]) < .exportPath([]) }) .uint64(uint64(len()))for , := range { .string(.exportPath()) .string(.Name()) .uint64(uint64(0)) // package height is not needed for go/types := [] .uint64(uint64(len()))for , := range { .string(.) .uint64([.]) } }}// exportName returns the 'exported' name of an object. It differs from// obj.Name() only for type parameters (see tparamExportName for details).func ( *iexporter) ( types.Object) ( string) {if := .tparamNames[]; != "" {return }return .Name()}typeiexporterstruct {fset *token.FileSetout *bytes.Bufferversionintshallowbool// don't put types from other packages in the indexobjEncoder *objectpath.Encoder// encodes objects from other packages in shallow mode; lazily allocatedlocalpkg *types.Package// (nil in bundle mode)// allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the // main index.allPkgsmap[*types.Package]booldeclTodoobjQueuestringsintWriterstringIndexmap[string]uint64// In shallow mode, object positions are encoded as (file, offset). // Each file is recorded as a line-number table. // Only the lines of needed positions are saved faithfully.fileInfomap[*token.File]uint64// value is index in fileInfosfileInfos []*filePositionsdata0intWriterdeclIndexmap[types.Object]uint64tparamNamesmap[types.Object]string// typeparam->exported nametypIndexmap[types.Type]uint64indentint// for tracing support}typefilePositionsstruct {file *token.Fileneeded []uint64// unordered list of needed file offsets}func ( *iexporter) ( string, ...any) {if !trace {// Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements.return }fmt.Printf(strings.Repeat("..", .indent)++"\n", ...)}// objectpathEncoder returns the lazily allocated objectpath.Encoder to use// when encoding objects in other packages during shallow export.//// Using a shared Encoder amortizes some of cost of objectpath search.func ( *iexporter) () *objectpath.Encoder {if .objEncoder == nil { .objEncoder = new(objectpath.Encoder) }return .objEncoder}// stringOff returns the offset of s within the string section.// If not already present, it's added to the end.func ( *iexporter) ( string) uint64 { , := .stringIndex[]if ! { = uint64(.strings.Len()) .stringIndex[] = .strings.uint64(uint64(len())) .strings.WriteString() }return}// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.func ( *iexporter) ( *token.File, token.Pos) (uint64, uint64) { , := .fileInfo[]if ! { = uint64(len(.fileInfo)) .fileInfos = append(.fileInfos, &filePositions{file: })if .fileInfo == nil { .fileInfo = make(map[*token.File]uint64) } .fileInfo[] = }// Record each needed offset. := .fileInfos[] := uint64(.Offset()) .needed = append(.needed, )return , }// pushDecl adds n to the declaration work queue, if not already present.func ( *iexporter) ( types.Object) {// Package unsafe is known to the compiler and predeclared. // Caller should not ask us to do export it.if .Pkg() == types.Unsafe {panic("cannot export package unsafe") }// Shallow export data: don't index decls from other packages.if .shallow && .Pkg() != .localpkg {return }if , := .declIndex[]; {return } .declIndex[] = ^uint64(0) // mark obj present in work queue .declTodo.pushTail()}// exportWriter handles writing out individual data section chunks.typeexportWriterstruct {p *iexporterdataintWriterprevFilestringprevLineint64prevColumnint64}func ( *exportWriter) ( *types.Package) string {if == .p.localpkg {return"" }return .Path()}func ( *iexporter) ( types.Object) {iftrace { .trace("exporting decl %v (%T)", , ) .indent++deferfunc() { .indent-- .trace("=> %s", ) }() } := .newWriter()switch obj := .(type) {case *types.Var: .tag(varTag) .pos(.Pos()) .typ(.Type(), .Pkg())case *types.Func: , := .Type().(*types.Signature)if .Recv() != nil {// We shouldn't see methods in the package scope, // but the type checker may repair "func () F() {}" // to "func (Invalid) F()" and then treat it like "func F()", // so allow that. See golang/go#57729.if .Recv().Type() != types.Typ[types.Invalid] {panic(internalErrorf("unexpected method: %v", )) } }// Function.if .TypeParams().Len() == 0 { .tag(funcTag) } else { .tag(genericFuncTag) } .pos(.Pos())// The tparam list of the function type is the declaration of the type // params. So, write out the type params right now. Then those type params // will be referenced via their type offset (via typOff) in all other // places in the signature and function where they are used. // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index.if := .TypeParams(); .Len() > 0 { .tparamList(.Name(), , .Pkg()) } .signature()case *types.Const: .tag(constTag) .pos(.Pos()) .value(.Type(), .Val())case *types.TypeName: := .Type()if , := types.Unalias().(*types.TypeParam); { .tag(typeParamTag) .pos(.Pos()) := .Constraint()if .version >= iexportVersionGo1_18 { := falseif , := types.Unalias().(*types.Interface); != nil { = .IsImplicit() } .bool() } .typ(, .Pkg())break }if .IsAlias() { , := .(*types.Alias) // may fail when aliases are not enabledvar *types.TypeParamListif { = aliases.TypeParams() }if .Len() == 0 { .tag(aliasTag) } else { .tag(genericAliasTag) } .pos(.Pos())if .Len() > 0 { .tparamList(.Name(), , .Pkg()) }if {// Preserve materialized aliases, // even of non-exported types. = aliases.Rhs() } .typ(, .Pkg())break }// Defined type. , := .(*types.Named)if ! {panic(internalErrorf("%s is not a defined type", )) }if .TypeParams().Len() == 0 { .tag(typeTag) } else { .tag(genericTypeTag) } .pos(.Pos())if .TypeParams().Len() > 0 {// While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. .tparamList(.Name(), .TypeParams(), .Pkg()) } := .Underlying() .typ(, .Pkg())iftypes.IsInterface() {break } := .NumMethods() .uint64(uint64())for := range { := .Method() .pos(.Pos()) .string(.Name()) , := .Type().(*types.Signature)// Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv.if := .RecvTypeParams(); .Len() > 0 { := .Name() + "." + .Name()for := 0; < .Len(); ++ { := .At() := tparamExportName(, ) .p.tparamNames[.Obj()] = } } .param(.Recv()) .signature() }default:panic(internalErrorf("unexpected object: %v", )) } .declIndex[] = .flush()}func ( *exportWriter) ( byte) { .data.WriteByte()}func ( *exportWriter) ( token.Pos) {if .p.shallow { .posV2() } elseif .p.version >= iexportVersionPosCol { .posV1() } else { .posV0() }}// posV2 encoding (used only in shallow mode) records positions as// (file, offset), where file is the index in the token.File table// (which records the file name and newline offsets) and offset is a// byte offset. It effectively ignores //line directives.func ( *exportWriter) ( token.Pos) {if == token.NoPos { .uint64(0)return } := .p.fset.File() // fset must be non-nil , := .p.fileIndexAndOffset(, ) .uint64(1 + ) .uint64()}func ( *exportWriter) ( token.Pos) {if .p.fset == nil { .int64(0)return } := .p.fset.Position() := .Filename := int64(.Line) := int64(.Column) := ( - .prevColumn) << 1 := ( - .prevLine) << 1if != .prevFile { |= 1 }if != 0 { |= 1 } .int64()if &1 != 0 { .int64()if &1 != 0 { .string() } } .prevFile = .prevLine = .prevColumn = }func ( *exportWriter) ( token.Pos) {if .p.fset == nil { .int64(0)return } := .p.fset.Position() := .Filename := int64(.Line)// When file is the same as the last position (common case), // we can save a few bytes by delta encoding just the line // number. // // Note: Because data objects may be read out of order (or not // at all), we can only apply delta encoding within a single // object. This is handled implicitly by tracking prevFile and // prevLine as fields of exportWriter.if == .prevFile { := - .prevLine .int64()if == deltaNewFile { .int64(-1) } } else { .int64(deltaNewFile) .int64() // line >= 0 .string() .prevFile = } .prevLine = }func ( *exportWriter) ( *types.Package) {// Ensure any referenced packages are declared in the main index. .p.allPkgs[] = true .string(.exportPath())}func ( *exportWriter) ( *types.TypeName) { := .p.exportName()// Ensure any referenced declarations are written out too. .p.pushDecl() .string() .pkg(.Pkg())}// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass// it in explicitly into signatures and structs that may use it for// constructing fields.func ( *exportWriter) ( types.Type, *types.Package) { .data.uint64(.p.typOff(, ))}func ( *iexporter) () *exportWriter {return &exportWriter{p: }}func ( *exportWriter) () uint64 { := uint64(.p.data0.Len())io.Copy(&.p.data0, &.data)return}func ( *iexporter) ( types.Type, *types.Package) uint64 { , := .typIndex[]if ! { := .newWriter() .doTyp(, ) = predeclReserved + .flush() .typIndex[] = }return}func ( *exportWriter) ( itag) { .data.uint64(uint64())}func ( *exportWriter) ( types.Type, *types.Package) {iftrace { .p.trace("exporting type %s (%T)", , ) .p.indent++deferfunc() { .p.indent-- .p.trace("=> %s", ) }() }switch t := .(type) {case *types.Alias:if := aliases.TypeArgs(); .Len() > 0 { .startType(instanceType) .pos(.Obj().Pos()) .typeList(, ) .typ(aliases.Origin(), )return } .startType(aliasType) .qualifiedType(.Obj())case *types.Named:if := .TypeArgs(); .Len() > 0 { .startType(instanceType)// TODO(rfindley): investigate if this position is correct, and if it // matters. .pos(.Obj().Pos()) .typeList(, ) .typ(.Origin(), )return } .startType(definedType) .qualifiedType(.Obj())case *types.TypeParam: .startType(typeParamType) .qualifiedType(.Obj())case *types.Pointer: .startType(pointerType) .typ(.Elem(), )case *types.Slice: .startType(sliceType) .typ(.Elem(), )case *types.Array: .startType(arrayType) .uint64(uint64(.Len())) .typ(.Elem(), )case *types.Chan: .startType(chanType)// 1 RecvOnly; 2 SendOnly; 3 SendRecvvaruint64switch .Dir() {casetypes.RecvOnly: = 1casetypes.SendOnly: = 2casetypes.SendRecv: = 3 } .uint64() .typ(.Elem(), )case *types.Map: .startType(mapType) .typ(.Key(), ) .typ(.Elem(), )case *types.Signature: .startType(signatureType) .pkg() .signature()case *types.Struct: .startType(structType) := .NumFields()// Even for struct{} we must emit some qualifying package, because that's // what the compiler does, and thus that's what the importer expects. := if > 0 { = .Field(0).Pkg() }if == nil {// TODO(rfindley): improve this very hacky logic. // // The importer expects a package to be set for all struct types, even // those with no fields. A better encoding might be to set NumFields // before pkg. setPkg panics with a nil package, which may be possible // to reach with invalid packages (and perhaps valid packages, too?), so // (arbitrarily) set the localpkg if available. // // Alternatively, we may be able to simply guarantee that pkg != nil, by // reconsidering the encoding of constant values.if .p.shallow { = .p.localpkg } else {panic(internalErrorf("no package to set for empty struct")) } } .pkg() .uint64(uint64())for := range { := .Field()if .p.shallow { .objectPath() } .pos(.Pos()) .string(.Name()) // unexported fields implicitly qualified by prior setPkg .typ(.Type(), ) .bool(.Anonymous()) .string(.Tag()) // note (or tag) }case *types.Interface: .startType(interfaceType) .pkg() := .NumEmbeddeds() .uint64(uint64())for := 0; < ; ++ { := .EmbeddedType() := if , := types.Unalias().(*types.Named); != nil { .pos(.Obj().Pos()) } else { .pos(token.NoPos) } .typ(, ) }// See comment for struct fields. In shallow mode we change the encoding // for interface methods that are promoted from other packages. = .NumExplicitMethods() .uint64(uint64())for := 0; < ; ++ { := .ExplicitMethod()if .p.shallow { .objectPath() } .pos(.Pos()) .string(.Name()) , := .Type().(*types.Signature) .signature() }case *types.Union: .startType(unionType) := .Len() .uint64(uint64())for := range { := .Term() .bool(.Tilde()) .typ(.Type(), ) }default:panic(internalErrorf("unexpected type: %v, %v", , reflect.TypeOf())) }}// objectPath writes the package and objectPath to use to look up obj in a// different package, when encoding in "shallow" mode.//// When doing a shallow import, the importer creates only the local package,// and requests package symbols for dependencies from the client.// However, certain types defined in the local package may hold objects defined// (perhaps deeply) within another package.//// For example, consider the following://// package a// func F() chan * map[string] struct { X int }//// package b// import "a"// var B = a.F()//// In this example, the type of b.B holds fields defined in package a.// In order to have the correct canonical objects for the field defined in the// type of B, they are encoded as objectPaths and later looked up in the// importer. The same problem applies to interface methods.func ( *exportWriter) ( types.Object) {if .Pkg() == nil || .Pkg() == .p.localpkg {// obj.Pkg() may be nil for the builtin error.Error. // In this case, or if obj is declared in the local package, no need to // encode. .string("")return } , := .p.objectpathEncoder().For()if != nil {// Fall back to the empty string, which will cause the importer to create a // new object, which matches earlier behavior. Creating a new object is // sufficient for many purposes (such as type checking), but causes certain // references algorithms to fail (golang/go#60819). However, we didn't // notice this problem during months of gopls@v0.12.0 testing. // // TODO(golang/go#61674): this workaround is insufficient, as in the case // where the field forwarded from an instantiated type that may not appear // in the export data of the original package: // // // package a // type A[P any] struct{ F P } // // // package b // type B a.A[int] // // We need to update references algorithms not to depend on this // de-duplication, at which point we may want to simply remove the // workaround here. .string("")return } .string(string()) .pkg(.Pkg())}func ( *exportWriter) ( *types.Signature) { .paramList(.Params()) .paramList(.Results())if .Params().Len() > 0 { .bool(.Variadic()) }}func ( *exportWriter) ( *types.TypeList, *types.Package) { .uint64(uint64(.Len()))for := 0; < .Len(); ++ { .typ(.At(), ) }}func ( *exportWriter) ( string, *types.TypeParamList, *types.Package) { := uint64(.Len()) .uint64()for := 0; < .Len(); ++ { := .At()// Set the type parameter exportName before exporting its type. := tparamExportName(, ) .p.tparamNames[.Obj()] = .typ(.At(), ) }}constblankMarker = "$"// tparamExportName returns the 'exported' name of a type parameter, which// differs from its actual object name: it is prefixed with a qualifier, and// blank type parameter names are disambiguated by their index in the type// parameter list.func ( string, *types.TypeParam) string {assert( != "") := .Obj().Name()if == "_" { = blankMarker + strconv.Itoa(.Index()) }return + "." + }// tparamName returns the real name of a type parameter, after stripping its// qualifying prefix and reverting blank-name encoding. See tparamExportName// for details.func ( string) string {// Remove the "path" from the type param name that makes it unique. := strings.LastIndex(, ".")if < 0 {errorf("malformed type parameter export name %s: missing prefix", ) } := [+1:]ifstrings.HasPrefix(, blankMarker) {return"_" }return}func ( *exportWriter) ( *types.Tuple) { := .Len() .uint64(uint64())for := range { .param(.At()) }}func ( *exportWriter) ( types.Object) { .pos(.Pos()) .localIdent() .typ(.Type(), .Pkg())}func ( *exportWriter) ( types.Type, constant.Value) { .typ(, nil)if .p.version >= iexportVersionGo1_18 { .int64(int64(.Kind())) }if .Kind() == constant.Unknown {// golang/go#60605: treat unknown constant values as if they have invalid type // // This loses some fidelity over the package type-checked from source, but that // is acceptable. // // TODO(rfindley): we should switch on the recorded constant kind rather // than the constant typereturn }switch := .Underlying().(*types.Basic); .Info() & types.IsConstType {casetypes.IsBoolean: .bool(constant.BoolVal())casetypes.IsInteger:varbig.Intif , := constant.Int64Val(); { .SetInt64() } elseif , := constant.Uint64Val(); { .SetUint64() } else { .SetString(.ExactString(), 10) } .mpint(&, )casetypes.IsFloat: := constantToFloat() .mpfloat(, )casetypes.IsComplex: .mpfloat(constantToFloat(constant.Real()), ) .mpfloat(constantToFloat(constant.Imag()), )casetypes.IsString: .string(constant.StringVal())default:if .Kind() == types.Invalid {// package contains type errorsbreak }panic(internalErrorf("unexpected type %v (%v)", , .Underlying())) }}// constantToFloat converts a constant.Value with kind constant.Float to a// big.Float.func ( constant.Value) *big.Float { = constant.ToFloat()// Use the same floating-point precision (512) as cmd/compile // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).const = 512varbig.Float .SetPrec()if , := constant.Float64Val(); {// float64 .SetFloat64() } elseif , := constant.Num(), constant.Denom(); .Kind() == constant.Int {// TODO(gri): add big.Rat accessor to constant.Value. := valueToRat() := valueToRat() .SetRat(.Quo(, )) } else {// Value too large to represent as a fraction => inaccessible. // TODO(gri): add big.Float accessor to constant.Value. , := .SetString(.ExactString())assert() }return &}func ( constant.Value) *big.Rat {// Convert little-endian to big-endian. // I can't believe this is necessary. := constant.Bytes()for := 0; < len()/2; ++ { [], [len()-1-] = [len()-1-], [] }returnnew(big.Rat).SetInt(new(big.Int).SetBytes())}// mpint exports a multi-precision integer.//// For unsigned types, small values are written out as a single// byte. Larger values are written out as a length-prefixed big-endian// byte string, where the length prefix is encoded as its complement.// For example, bytes 0, 1, and 2 directly represent the integer// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,// 2-, and 3-byte big-endian string follow.//// Encoding for signed types use the same general approach as for// unsigned types, except small values use zig-zag encoding and the// bottom bit of length prefix byte for large values is reserved as a// sign bit.//// The exact boundary between small and large encodings varies// according to the maximum number of bytes needed to encode a value// of type typ. As a special case, 8-bit types are always encoded as a// single byte.//// TODO(mdempsky): Is this level of complexity really worthwhile?func ( *exportWriter) ( *big.Int, types.Type) { , := .Underlying().(*types.Basic)if ! {panic(internalErrorf("unexpected type %v (%T)", .Underlying(), .Underlying())) } , := intSize() := .Sign() < 0if ! && {panic(internalErrorf("negative unsigned integer; type %v, value %v", , )) } := .Bytes()iflen() > 0 && [0] == 0 {panic(internalErrorf("leading zeros")) }ifuint(len()) > {panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(), , , )) } := 256 - if { = 256 - 2* }if == 1 { = 256 }// Check if x can use small value encoding.iflen() <= 1 {varuintiflen() == 1 { = uint([0]) }if { <<= 1if { -- } }if < { .data.WriteByte(byte())return } } := 256 - uint(len())if { = 256 - 2*uint(len())if { |= 1 } }if < || >= 256 {panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(), , , )) } .data.WriteByte(byte()) .data.Write()}// mpfloat exports a multi-precision floating point number.//// The number's value is decomposed into mantissa × 2**exponent, where// mantissa is an integer. The value is written out as mantissa (as a// multi-precision integer) and then the exponent, except exponent is// omitted if mantissa is zero.func ( *exportWriter) ( *big.Float, types.Type) {if .IsInf() {panic("infinite constant") }// Break into f = mant × 2**exp, with 0.5 <= mant < 1.varbig.Float := int64(.MantExp(&))// Scale so that mant is an integer. := .MinPrec() .SetMantExp(&, int()) -= int64() , := .Int(nil)if != big.Exact {panic(internalErrorf("mantissa scaling failed for %f (%s)", , )) } .mpint(, )if .Sign() != 0 { .int64() }}func ( *exportWriter) ( bool) bool {varuint64if { = 1 } .uint64()return}func ( *exportWriter) ( int64) { .data.int64() }func ( *exportWriter) ( uint64) { .data.uint64() }func ( *exportWriter) ( string) { .uint64(.p.stringOff()) }func ( *exportWriter) ( types.Object) {// Anonymous parameters.if == nil { .string("")return } := .Name()if == "_" { .string("_")return } .string()}typeintWriterstruct {bytes.Buffer}func ( *intWriter) ( int64) {var [binary.MaxVarintLen64]byte := binary.PutVarint([:], ) .Write([:])}func ( *intWriter) ( uint64) {var [binary.MaxVarintLen64]byte := binary.PutUvarint([:], ) .Write([:])}func ( bool) {if ! {panic("internal error: assertion failed") }}// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.// objQueue is a FIFO queue of types.Object. The zero value of objQueue is// a ready-to-use empty queue.typeobjQueuestruct {ring []types.Objecthead, tailint}// empty returns true if q contains no Nodes.func ( *objQueue) () bool {return .head == .tail}// pushTail appends n to the tail of the queue.func ( *objQueue) ( types.Object) {iflen(.ring) == 0 { .ring = make([]types.Object, 16) } elseif .head+len(.ring) == .tail {// Grow the ring. := make([]types.Object, len(.ring)*2)// Copy the old elements. := .ring[.head%len(.ring):]if .tail-.head <= len() { = [:.tail-.head]copy(, ) } else { := copy(, )copy([:], .ring[:.tail%len(.ring)]) } .ring, .head, .tail = , 0, .tail-.head } .ring[.tail%len(.ring)] = .tail++}// popHead pops a node from the head of the queue. It panics if q is empty.func ( *objQueue) () types.Object {if .empty() {panic("dequeue empty") } := .ring[.head%len(.ring)] .head++return}// internalError represents an error generated inside this package.typeinternalErrorstringfunc ( internalError) () string { return"gcimporter: " + string() }// TODO(adonovan): make this call panic, so that it's symmetric with errorf.// Otherwise it's easy to forget to do anything with the error.//// TODO(adonovan): also, consider switching the names "errorf" and// "internalErrorf" as the former is used for bugs, whose cause is// internal inconsistency, whereas the latter is used for ordinary// situations like bad input, whose cause is external.func ( string, ...any) error {returninternalError(fmt.Sprintf(, ...))}
The pages are generated with Goldsv0.7.6. (GOOS=linux GOARCH=amd64)