// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Indexed binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
// see that file for specification of the format.

package gcimporter

import (
	
	
	
	
	
	
	
	
	
	
	
	

	
	
)

// IExportShallow encodes "shallow" export data for the specified package.
//
// No promises are made about the encoding other than that it can be
// decoded by the same version of IIExportShallow. If you plan to save
// export data in the file system, be sure to include a cryptographic
// digest of the executable in the key to avoid version skew.
func ( *token.FileSet,  *types.Package) ([]byte, error) {
	// In principle this operation can only fail if out.Write fails,
	// but that's impossible for bytes.Buffer---and as a matter of
	// fact iexportCommon doesn't even check for I/O errors.
	// TODO(adonovan): handle I/O errors properly.
	// TODO(adonovan): use byte slices throughout, avoiding copying.
	const ,  = false, true
	var  bytes.Buffer
	 := iexportCommon(&, , , , iexportVersion, []*types.Package{})
	return .Bytes(), 
}

// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow
// in the same executable. This function cannot import data from
// cmd/compile or gcexportdata.Write.
func ( *token.FileSet,  map[string]*types.Package,  []byte,  string,  InsertType) (*types.Package, error) {
	const  = false
	,  := iimportCommon(, , , , , )
	if  != nil {
		return nil, 
	}
	return [0], nil
}

// InsertType is the type of a function that creates a types.TypeName
// object for a named type and inserts it into the scope of the
// specified Package.
type InsertType = func(pkg *types.Package, name string)

// Current bundled export format version. Increase with each format change.
// 0: initial implementation
const bundleVersion = 0

// IExportData writes indexed export data for pkg to out.
//
// If no file set is provided, position info will be missing.
// The package path of the top-level package will not be recorded,
// so that calls to IImportData can override with a provided package path.
func ( io.Writer,  *token.FileSet,  *types.Package) error {
	const ,  = false, false
	return iexportCommon(, , , , iexportVersion, []*types.Package{})
}

// IExportBundle writes an indexed export bundle for pkgs to out.
func ( io.Writer,  *token.FileSet,  []*types.Package) error {
	const ,  = true, false
	return iexportCommon(, , , , iexportVersion, )
}

func ( io.Writer,  *token.FileSet, ,  bool,  int,  []*types.Package) ( error) {
	if !debug {
		defer func() {
			if  := recover();  != nil {
				if ,  := .(internalError);  {
					 = 
					return
				}
				// Not an internal error; panic again.
				panic()
			}
		}()
	}

	 := iexporter{
		fset:        ,
		version:     ,
		shallow:     ,
		allPkgs:     map[*types.Package]bool{},
		stringIndex: map[string]uint64{},
		declIndex:   map[types.Object]uint64{},
		tparamNames: map[types.Object]string{},
		typIndex:    map[types.Type]uint64{},
	}
	if ! {
		.localpkg = [0]
	}

	for ,  := range predeclared() {
		.typIndex[] = uint64()
	}
	if len(.typIndex) > predeclReserved {
		panic(internalErrorf("too many predeclared types: %d > %d", len(.typIndex), predeclReserved))
	}

	// Initialize work queue with exported declarations.
	for ,  := range  {
		 := .Scope()
		for ,  := range .Names() {
			if token.IsExported() {
				.pushDecl(.Lookup())
			}
		}

		if  {
			// Ensure pkg and its imports are included in the index.
			.allPkgs[] = true
			for ,  := range .Imports() {
				.allPkgs[] = true
			}
		}
	}

	// Loop until no more work.
	for !.declTodo.empty() {
		.doDecl(.declTodo.popHead())
	}

	// Produce index of offset of each file record in files.
	var  intWriter
	var  []uint64 // fileOffset[i] is offset in files of file encoded as i
	if .shallow {
		 = make([]uint64, len(.fileInfos))
		for ,  := range .fileInfos {
			[] = uint64(.Len())
			.encodeFile(&, .file, .needed)
		}
	}

	// Append indices to data0 section.
	 := uint64(.data0.Len())
	 := .newWriter()
	.writeIndex(.declIndex)

	if  {
		.uint64(uint64(len()))
		for ,  := range  {
			.pkg()
			 := .Imports()
			.uint64(uint64(len()))
			for ,  := range  {
				.pkg()
			}
		}
	}
	.flush()

	// Assemble header.
	var  intWriter
	if  {
		.uint64(bundleVersion)
	}
	.uint64(uint64(.version))
	.uint64(uint64(.strings.Len()))
	if .shallow {
		.uint64(uint64(.Len()))
		.uint64(uint64(len()))
		for ,  := range  {
			.uint64()
		}
	}
	.uint64()

	// Flush output.
	io.Copy(, &)
	io.Copy(, &.strings)
	if .shallow {
		io.Copy(, &)
	}
	io.Copy(, &.data0)

	return nil
}

// encodeFile writes to w a representation of the file sufficient to
// faithfully restore position information about all needed offsets.
// Mutates the needed array.
func ( *iexporter) ( *intWriter,  *token.File,  []uint64) {
	_ = [0] // precondition: needed is non-empty

	.uint64(.stringOff(.Name()))

	 := uint64(.Size())
	.uint64()

	// Sort the set of needed offsets. Duplicates are harmless.
	sort.Slice(, func(,  int) bool { return [] < [] })

	 := tokeninternal.GetLines() // byte offset of each line start
	.uint64(uint64(len()))

	// Rather than record the entire array of line start offsets,
	// we save only a sparse list of (index, offset) pairs for
	// the start of each line that contains a needed position.
	var  [][2]int // (index, offset) pairs
:
	for ,  := range  {
		 := 
		if  < len()-1 {
			 = uint64([+1])
		}
		// Does this line contains a needed offset?
		if [0] <  {
			 = append(, [2]int{, })
			for [0] <  {
				 = [1:]
				if len() == 0 {
					break 
				}
			}
		}
	}

	// Delta-encode the columns.
	.uint64(uint64(len()))
	var  [2]int
	for ,  := range  {
		.uint64(uint64([0] - [0]))
		.uint64(uint64([1] - [1]))
		 = 
	}
}

// writeIndex writes out an object index. mainIndex indicates whether
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
func ( *exportWriter) ( map[types.Object]uint64) {
	type  struct {
		  types.Object
		 string // qualified name; differs from obj.Name for type params
	}
	// Build a map from packages to objects from that package.
	 := map[*types.Package][]{}

	// For the main index, make sure to include every package that
	// we reference, even if we're not exporting (or reexporting)
	// any symbols from it.
	if .p.localpkg != nil {
		[.p.localpkg] = nil
	}
	for  := range .p.allPkgs {
		[] = nil
	}

	for  := range  {
		 := .p.exportName()
		[.Pkg()] = append([.Pkg()], {, })
	}

	var  []*types.Package
	for ,  := range  {
		 = append(, )

		sort.Slice(, func(,  int) bool {
			return []. < [].
		})
	}

	sort.Slice(, func(,  int) bool {
		return .exportPath([]) < .exportPath([])
	})

	.uint64(uint64(len()))
	for ,  := range  {
		.string(.exportPath())
		.string(.Name())
		.uint64(uint64(0)) // package height is not needed for go/types

		 := []
		.uint64(uint64(len()))
		for ,  := range  {
			.string(.)
			.uint64([.])
		}
	}
}

// exportName returns the 'exported' name of an object. It differs from
// obj.Name() only for type parameters (see tparamExportName for details).
func ( *iexporter) ( types.Object) ( string) {
	if  := .tparamNames[];  != "" {
		return 
	}
	return .Name()
}

type iexporter struct {
	fset    *token.FileSet
	out     *bytes.Buffer
	version int

	shallow  bool           // don't put types from other packages in the index
	localpkg *types.Package // (nil in bundle mode)

	// allPkgs tracks all packages that have been referenced by
	// the export data, so we can ensure to include them in the
	// main index.
	allPkgs map[*types.Package]bool

	declTodo objQueue

	strings     intWriter
	stringIndex map[string]uint64

	// In shallow mode, object positions are encoded as (file, offset).
	// Each file is recorded as a line-number table.
	// Only the lines of needed positions are saved faithfully.
	fileInfo  map[*token.File]uint64 // value is index in fileInfos
	fileInfos []*filePositions

	data0       intWriter
	declIndex   map[types.Object]uint64
	tparamNames map[types.Object]string // typeparam->exported name
	typIndex    map[types.Type]uint64

	indent int // for tracing support
}

type filePositions struct {
	file   *token.File
	needed []uint64 // unordered list of needed file offsets
}

func ( *iexporter) ( string,  ...interface{}) {
	if !trace {
		// Call sites should also be guarded, but having this check here allows
		// easily enabling/disabling debug trace statements.
		return
	}
	fmt.Printf(strings.Repeat("..", .indent)++"\n", ...)
}

// stringOff returns the offset of s within the string section.
// If not already present, it's added to the end.
func ( *iexporter) ( string) uint64 {
	,  := .stringIndex[]
	if ! {
		 = uint64(.strings.Len())
		.stringIndex[] = 

		.strings.uint64(uint64(len()))
		.strings.WriteString()
	}
	return 
}

// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
func ( *iexporter) ( *token.File,  token.Pos) (uint64, uint64) {
	,  := .fileInfo[]
	if ! {
		 = uint64(len(.fileInfo))
		.fileInfos = append(.fileInfos, &filePositions{file: })
		if .fileInfo == nil {
			.fileInfo = make(map[*token.File]uint64)
		}
		.fileInfo[] = 
	}
	// Record each needed offset.
	 := .fileInfos[]
	 := uint64(.Offset())
	.needed = append(.needed, )

	return , 
}

// pushDecl adds n to the declaration work queue, if not already present.
func ( *iexporter) ( types.Object) {
	// Package unsafe is known to the compiler and predeclared.
	// Caller should not ask us to do export it.
	if .Pkg() == types.Unsafe {
		panic("cannot export package unsafe")
	}

	// Shallow export data: don't index decls from other packages.
	if .shallow && .Pkg() != .localpkg {
		return
	}

	if ,  := .declIndex[];  {
		return
	}

	.declIndex[] = ^uint64(0) // mark obj present in work queue
	.declTodo.pushTail()
}

// exportWriter handles writing out individual data section chunks.
type exportWriter struct {
	p *iexporter

	data       intWriter
	currPkg    *types.Package
	prevFile   string
	prevLine   int64
	prevColumn int64
}

func ( *exportWriter) ( *types.Package) string {
	if  == .p.localpkg {
		return ""
	}
	return .Path()
}

func ( *iexporter) ( types.Object) {
	if trace {
		.trace("exporting decl %v (%T)", , )
		.indent++
		defer func() {
			.indent--
			.trace("=> %s", )
		}()
	}
	 := .newWriter()
	.setPkg(.Pkg(), false)

	switch obj := .(type) {
	case *types.Var:
		.tag('V')
		.pos(.Pos())
		.typ(.Type(), .Pkg())

	case *types.Func:
		,  := .Type().(*types.Signature)
		if .Recv() != nil {
			// We shouldn't see methods in the package scope,
			// but the type checker may repair "func () F() {}"
			// to "func (Invalid) F()" and then treat it like "func F()",
			// so allow that. See golang/go#57729.
			if .Recv().Type() != types.Typ[types.Invalid] {
				panic(internalErrorf("unexpected method: %v", ))
			}
		}

		// Function.
		if typeparams.ForSignature().Len() == 0 {
			.tag('F')
		} else {
			.tag('G')
		}
		.pos(.Pos())
		// The tparam list of the function type is the declaration of the type
		// params. So, write out the type params right now. Then those type params
		// will be referenced via their type offset (via typOff) in all other
		// places in the signature and function where they are used.
		//
		// While importing the type parameters, tparamList computes and records
		// their export name, so that it can be later used when writing the index.
		if  := typeparams.ForSignature(); .Len() > 0 {
			.tparamList(.Name(), , .Pkg())
		}
		.signature()

	case *types.Const:
		.tag('C')
		.pos(.Pos())
		.value(.Type(), .Val())

	case *types.TypeName:
		 := .Type()

		if ,  := .(*typeparams.TypeParam);  {
			.tag('P')
			.pos(.Pos())
			 := .Constraint()
			if .version >= iexportVersionGo1_18 {
				 := false
				if ,  := .(*types.Interface);  != nil {
					 = typeparams.IsImplicit()
				}
				.bool()
			}
			.typ(, .Pkg())
			break
		}

		if .IsAlias() {
			.tag('A')
			.pos(.Pos())
			.typ(, .Pkg())
			break
		}

		// Defined type.
		,  := .(*types.Named)
		if ! {
			panic(internalErrorf("%s is not a defined type", ))
		}

		if typeparams.ForNamed().Len() == 0 {
			.tag('T')
		} else {
			.tag('U')
		}
		.pos(.Pos())

		if typeparams.ForNamed().Len() > 0 {
			// While importing the type parameters, tparamList computes and records
			// their export name, so that it can be later used when writing the index.
			.tparamList(.Name(), typeparams.ForNamed(), .Pkg())
		}

		 := .Type().Underlying()
		.typ(, .Pkg())

		if types.IsInterface() {
			break
		}

		 := .NumMethods()
		.uint64(uint64())
		for  := 0;  < ; ++ {
			 := .Method()
			.pos(.Pos())
			.string(.Name())
			,  := .Type().(*types.Signature)

			// Receiver type parameters are type arguments of the receiver type, so
			// their name must be qualified before exporting recv.
			if  := typeparams.RecvTypeParams(); .Len() > 0 {
				 := .Name() + "." + .Name()
				for  := 0;  < .Len(); ++ {
					 := .At()
					 := tparamExportName(, )
					.p.tparamNames[.Obj()] = 
				}
			}
			.param(.Recv())
			.signature()
		}

	default:
		panic(internalErrorf("unexpected object: %v", ))
	}

	.declIndex[] = .flush()
}

func ( *exportWriter) ( byte) {
	.data.WriteByte()
}

func ( *exportWriter) ( token.Pos) {
	if .p.shallow {
		.posV2()
	} else if .p.version >= iexportVersionPosCol {
		.posV1()
	} else {
		.posV0()
	}
}

// posV2 encoding (used only in shallow mode) records positions as
// (file, offset), where file is the index in the token.File table
// (which records the file name and newline offsets) and offset is a
// byte offset. It effectively ignores //line directives.
func ( *exportWriter) ( token.Pos) {
	if  == token.NoPos {
		.uint64(0)
		return
	}
	 := .p.fset.File() // fset must be non-nil
	,  := .p.fileIndexAndOffset(, )
	.uint64(1 + )
	.uint64()
}

func ( *exportWriter) ( token.Pos) {
	if .p.fset == nil {
		.int64(0)
		return
	}

	 := .p.fset.Position()
	 := .Filename
	 := int64(.Line)
	 := int64(.Column)

	 := ( - .prevColumn) << 1
	 := ( - .prevLine) << 1

	if  != .prevFile {
		 |= 1
	}
	if  != 0 {
		 |= 1
	}

	.int64()
	if &1 != 0 {
		.int64()
		if &1 != 0 {
			.string()
		}
	}

	.prevFile = 
	.prevLine = 
	.prevColumn = 
}

func ( *exportWriter) ( token.Pos) {
	if .p.fset == nil {
		.int64(0)
		return
	}

	 := .p.fset.Position()
	 := .Filename
	 := int64(.Line)

	// When file is the same as the last position (common case),
	// we can save a few bytes by delta encoding just the line
	// number.
	//
	// Note: Because data objects may be read out of order (or not
	// at all), we can only apply delta encoding within a single
	// object. This is handled implicitly by tracking prevFile and
	// prevLine as fields of exportWriter.

	if  == .prevFile {
		 :=  - .prevLine
		.int64()
		if  == deltaNewFile {
			.int64(-1)
		}
	} else {
		.int64(deltaNewFile)
		.int64() // line >= 0
		.string()
		.prevFile = 
	}
	.prevLine = 
}

func ( *exportWriter) ( *types.Package) {
	// Ensure any referenced packages are declared in the main index.
	.p.allPkgs[] = true

	.string(.exportPath())
}

func ( *exportWriter) ( *types.TypeName) {
	 := .p.exportName()

	// Ensure any referenced declarations are written out too.
	.p.pushDecl()
	.string()
	.pkg(.Pkg())
}

func ( *exportWriter) ( types.Type,  *types.Package) {
	.data.uint64(.p.typOff(, ))
}

func ( *iexporter) () *exportWriter {
	return &exportWriter{p: }
}

func ( *exportWriter) () uint64 {
	 := uint64(.p.data0.Len())
	io.Copy(&.p.data0, &.data)
	return 
}

func ( *iexporter) ( types.Type,  *types.Package) uint64 {
	,  := .typIndex[]
	if ! {
		 := .newWriter()
		.doTyp(, )
		 = predeclReserved + .flush()
		.typIndex[] = 
	}
	return 
}

func ( *exportWriter) ( itag) {
	.data.uint64(uint64())
}

func ( *exportWriter) ( types.Type,  *types.Package) {
	if trace {
		.p.trace("exporting type %s (%T)", , )
		.p.indent++
		defer func() {
			.p.indent--
			.p.trace("=> %s", )
		}()
	}
	switch t := .(type) {
	case *types.Named:
		if  := typeparams.NamedTypeArgs(); .Len() > 0 {
			.startType(instanceType)
			// TODO(rfindley): investigate if this position is correct, and if it
			// matters.
			.pos(.Obj().Pos())
			.typeList(, )
			.typ(typeparams.NamedTypeOrigin(), )
			return
		}
		.startType(definedType)
		.qualifiedType(.Obj())

	case *typeparams.TypeParam:
		.startType(typeParamType)
		.qualifiedType(.Obj())

	case *types.Pointer:
		.startType(pointerType)
		.typ(.Elem(), )

	case *types.Slice:
		.startType(sliceType)
		.typ(.Elem(), )

	case *types.Array:
		.startType(arrayType)
		.uint64(uint64(.Len()))
		.typ(.Elem(), )

	case *types.Chan:
		.startType(chanType)
		// 1 RecvOnly; 2 SendOnly; 3 SendRecv
		var  uint64
		switch .Dir() {
		case types.RecvOnly:
			 = 1
		case types.SendOnly:
			 = 2
		case types.SendRecv:
			 = 3
		}
		.uint64()
		.typ(.Elem(), )

	case *types.Map:
		.startType(mapType)
		.typ(.Key(), )
		.typ(.Elem(), )

	case *types.Signature:
		.startType(signatureType)
		.setPkg(, true)
		.signature()

	case *types.Struct:
		.startType(structType)
		 := .NumFields()
		if  > 0 {
			.setPkg(.Field(0).Pkg(), true) // qualifying package for field objects
		} else {
			.setPkg(, true)
		}
		.uint64(uint64())
		for  := 0;  < ; ++ {
			 := .Field()
			.pos(.Pos())
			.string(.Name()) // unexported fields implicitly qualified by prior setPkg
			.typ(.Type(), )
			.bool(.Anonymous())
			.string(.Tag()) // note (or tag)
		}

	case *types.Interface:
		.startType(interfaceType)
		.setPkg(, true)

		 := .NumEmbeddeds()
		.uint64(uint64())
		for  := 0;  < ; ++ {
			 := .EmbeddedType()
			 := 
			if ,  := .(*types.Named);  != nil {
				.pos(.Obj().Pos())
			} else {
				.pos(token.NoPos)
			}
			.typ(, )
		}

		 = .NumExplicitMethods()
		.uint64(uint64())
		for  := 0;  < ; ++ {
			 := .ExplicitMethod()
			.pos(.Pos())
			.string(.Name())
			,  := .Type().(*types.Signature)
			.signature()
		}

	case *typeparams.Union:
		.startType(unionType)
		 := .Len()
		.uint64(uint64())
		for  := 0;  < ; ++ {
			 := .Term()
			.bool(.Tilde())
			.typ(.Type(), )
		}

	default:
		panic(internalErrorf("unexpected type: %v, %v", , reflect.TypeOf()))
	}
}

func ( *exportWriter) ( *types.Package,  bool) {
	if  {
		.pkg()
	}

	.currPkg = 
}

func ( *exportWriter) ( *types.Signature) {
	.paramList(.Params())
	.paramList(.Results())
	if .Params().Len() > 0 {
		.bool(.Variadic())
	}
}

func ( *exportWriter) ( *typeparams.TypeList,  *types.Package) {
	.uint64(uint64(.Len()))
	for  := 0;  < .Len(); ++ {
		.typ(.At(), )
	}
}

func ( *exportWriter) ( string,  *typeparams.TypeParamList,  *types.Package) {
	 := uint64(.Len())
	.uint64()
	for  := 0;  < .Len(); ++ {
		 := .At()
		// Set the type parameter exportName before exporting its type.
		 := tparamExportName(, )
		.p.tparamNames[.Obj()] = 
		.typ(.At(), )
	}
}

const blankMarker = "$"

// tparamExportName returns the 'exported' name of a type parameter, which
// differs from its actual object name: it is prefixed with a qualifier, and
// blank type parameter names are disambiguated by their index in the type
// parameter list.
func ( string,  *typeparams.TypeParam) string {
	assert( != "")
	 := .Obj().Name()
	if  == "_" {
		 = blankMarker + strconv.Itoa(.Index())
	}
	return  + "." + 
}

// tparamName returns the real name of a type parameter, after stripping its
// qualifying prefix and reverting blank-name encoding. See tparamExportName
// for details.
func ( string) string {
	// Remove the "path" from the type param name that makes it unique.
	 := strings.LastIndex(, ".")
	if  < 0 {
		errorf("malformed type parameter export name %s: missing prefix", )
	}
	 := [+1:]
	if strings.HasPrefix(, blankMarker) {
		return "_"
	}
	return 
}

func ( *exportWriter) ( *types.Tuple) {
	 := .Len()
	.uint64(uint64())
	for  := 0;  < ; ++ {
		.param(.At())
	}
}

func ( *exportWriter) ( types.Object) {
	.pos(.Pos())
	.localIdent()
	.typ(.Type(), .Pkg())
}

func ( *exportWriter) ( types.Type,  constant.Value) {
	.typ(, nil)
	if .p.version >= iexportVersionGo1_18 {
		.int64(int64(.Kind()))
	}

	switch  := .Underlying().(*types.Basic); .Info() & types.IsConstType {
	case types.IsBoolean:
		.bool(constant.BoolVal())
	case types.IsInteger:
		var  big.Int
		if ,  := constant.Int64Val();  {
			.SetInt64()
		} else if ,  := constant.Uint64Val();  {
			.SetUint64()
		} else {
			.SetString(.ExactString(), 10)
		}
		.mpint(&, )
	case types.IsFloat:
		 := constantToFloat()
		.mpfloat(, )
	case types.IsComplex:
		.mpfloat(constantToFloat(constant.Real()), )
		.mpfloat(constantToFloat(constant.Imag()), )
	case types.IsString:
		.string(constant.StringVal())
	default:
		if .Kind() == types.Invalid {
			// package contains type errors
			break
		}
		panic(internalErrorf("unexpected type %v (%v)", , .Underlying()))
	}
}

// constantToFloat converts a constant.Value with kind constant.Float to a
// big.Float.
func ( constant.Value) *big.Float {
	 = constant.ToFloat()
	// Use the same floating-point precision (512) as cmd/compile
	// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
	const  = 512
	var  big.Float
	.SetPrec()
	if ,  := constant.Float64Val();  {
		// float64
		.SetFloat64()
	} else if ,  := constant.Num(), constant.Denom(); .Kind() == constant.Int {
		// TODO(gri): add big.Rat accessor to constant.Value.
		 := valueToRat()
		 := valueToRat()
		.SetRat(.Quo(, ))
	} else {
		// Value too large to represent as a fraction => inaccessible.
		// TODO(gri): add big.Float accessor to constant.Value.
		,  := .SetString(.ExactString())
		assert()
	}
	return &
}

// mpint exports a multi-precision integer.
//
// For unsigned types, small values are written out as a single
// byte. Larger values are written out as a length-prefixed big-endian
// byte string, where the length prefix is encoded as its complement.
// For example, bytes 0, 1, and 2 directly represent the integer
// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
// 2-, and 3-byte big-endian string follow.
//
// Encoding for signed types use the same general approach as for
// unsigned types, except small values use zig-zag encoding and the
// bottom bit of length prefix byte for large values is reserved as a
// sign bit.
//
// The exact boundary between small and large encodings varies
// according to the maximum number of bytes needed to encode a value
// of type typ. As a special case, 8-bit types are always encoded as a
// single byte.
//
// TODO(mdempsky): Is this level of complexity really worthwhile?
func ( *exportWriter) ( *big.Int,  types.Type) {
	,  := .Underlying().(*types.Basic)
	if ! {
		panic(internalErrorf("unexpected type %v (%T)", .Underlying(), .Underlying()))
	}

	,  := intSize()

	 := .Sign() < 0
	if ! &&  {
		panic(internalErrorf("negative unsigned integer; type %v, value %v", , ))
	}

	 := .Bytes()
	if len() > 0 && [0] == 0 {
		panic(internalErrorf("leading zeros"))
	}
	if uint(len()) >  {
		panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(), , , ))
	}

	 := 256 - 
	if  {
		 = 256 - 2*
	}
	if  == 1 {
		 = 256
	}

	// Check if x can use small value encoding.
	if len() <= 1 {
		var  uint
		if len() == 1 {
			 = uint([0])
		}
		if  {
			 <<= 1
			if  {
				--
			}
		}
		if  <  {
			.data.WriteByte(byte())
			return
		}
	}

	 := 256 - uint(len())
	if  {
		 = 256 - 2*uint(len())
		if  {
			 |= 1
		}
	}
	if  <  ||  >= 256 {
		panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(), , , ))
	}

	.data.WriteByte(byte())
	.data.Write()
}

// mpfloat exports a multi-precision floating point number.
//
// The number's value is decomposed into mantissa × 2**exponent, where
// mantissa is an integer. The value is written out as mantissa (as a
// multi-precision integer) and then the exponent, except exponent is
// omitted if mantissa is zero.
func ( *exportWriter) ( *big.Float,  types.Type) {
	if .IsInf() {
		panic("infinite constant")
	}

	// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
	var  big.Float
	 := int64(.MantExp(&))

	// Scale so that mant is an integer.
	 := .MinPrec()
	.SetMantExp(&, int())
	 -= int64()

	,  := .Int(nil)
	if  != big.Exact {
		panic(internalErrorf("mantissa scaling failed for %f (%s)", , ))
	}
	.mpint(, )
	if .Sign() != 0 {
		.int64()
	}
}

func ( *exportWriter) ( bool) bool {
	var  uint64
	if  {
		 = 1
	}
	.uint64()
	return 
}

func ( *exportWriter) ( int64)   { .data.int64() }
func ( *exportWriter) ( uint64) { .data.uint64() }
func ( *exportWriter) ( string) { .uint64(.p.stringOff()) }

func ( *exportWriter) ( types.Object) {
	// Anonymous parameters.
	if  == nil {
		.string("")
		return
	}

	 := .Name()
	if  == "_" {
		.string("_")
		return
	}

	.string()
}

type intWriter struct {
	bytes.Buffer
}

func ( *intWriter) ( int64) {
	var  [binary.MaxVarintLen64]byte
	 := binary.PutVarint([:], )
	.Write([:])
}

func ( *intWriter) ( uint64) {
	var  [binary.MaxVarintLen64]byte
	 := binary.PutUvarint([:], )
	.Write([:])
}

func ( bool) {
	if ! {
		panic("internal error: assertion failed")
	}
}

// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.

// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
// a ready-to-use empty queue.
type objQueue struct {
	ring       []types.Object
	head, tail int
}

// empty returns true if q contains no Nodes.
func ( *objQueue) () bool {
	return .head == .tail
}

// pushTail appends n to the tail of the queue.
func ( *objQueue) ( types.Object) {
	if len(.ring) == 0 {
		.ring = make([]types.Object, 16)
	} else if .head+len(.ring) == .tail {
		// Grow the ring.
		 := make([]types.Object, len(.ring)*2)
		// Copy the old elements.
		 := .ring[.head%len(.ring):]
		if .tail-.head <= len() {
			 = [:.tail-.head]
			copy(, )
		} else {
			 := copy(, )
			copy([:], .ring[:.tail%len(.ring)])
		}
		.ring, .head, .tail = , 0, .tail-.head
	}

	.ring[.tail%len(.ring)] = 
	.tail++
}

// popHead pops a node from the head of the queue. It panics if q is empty.
func ( *objQueue) () types.Object {
	if .empty() {
		panic("dequeue empty")
	}
	 := .ring[.head%len(.ring)]
	.head++
	return 
}