// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package parser implements a parser for Go source files. Input may be // provided in a variety of forms (see the various Parse* functions); the // output is an abstract syntax tree (AST) representing the Go source. The // parser is invoked through one of the Parse* functions. // // The parser accepts a larger language than is syntactically permitted by // the Go spec, for simplicity, and for improved robustness in the presence // of syntax errors. For instance, in method declarations, the receiver is // treated like an ordinary parameter list and thus may contain multiple // entries where the spec permits exactly one. Consequently, the corresponding // field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry. //
package parser import ( ) // The parser structure holds the parser's internal state. type parser struct { file *token.File errors scanner.ErrorList scanner scanner.Scanner // Tracing/debugging mode Mode // parsing mode trace bool // == (mode&Trace != 0) indent int // indentation used for tracing output // Comments comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment // Next token pos token.Pos // token position tok token.Token // one token look-ahead lit string // token literal // Error recovery // (used to limit the number of calls to parser.advance // w/o making scanning progress - avoids potential endless // loops across multiple parser functions during error recovery) syncPos token.Pos // last synchronization position syncCnt int // number of parser.advance calls without progress // Non-syntactic parser control exprLev int // < 0: in control clause, >= 0: in expression inRhs bool // if set, the parser is parsing a rhs expression imports []*ast.ImportSpec // list of imports } func ( *parser) ( *token.FileSet, string, []byte, Mode) { .file = .AddFile(, -1, len()) var scanner.Mode if &ParseComments != 0 { = scanner.ScanComments } := func( token.Position, string) { .errors.Add(, ) } .scanner.Init(.file, , , ) .mode = .trace = &Trace != 0 // for convenience (p.trace is used frequently) .next() } func ( *parser) () bool { return .mode&typeparams.DisallowParsing == 0 } func ( *parser) () bool { return .mode&typeparams.DisallowTypeSets == 0 } // ---------------------------------------------------------------------------- // Parsing support func ( *parser) ( ...any) { const = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const = len() := .file.Position(.pos) fmt.Printf("%5d:%3d: ", .Line, .Column) := 2 * .indent for > { fmt.Print() -= } // i <= n fmt.Print([0:]) fmt.Println(...) } func ( *parser, string) *parser { .printTrace(, "(") .indent++ return } // Usage pattern: defer un(trace(p, "...")) func ( *parser) { .indent-- .printTrace(")") } // Advance to the next token. func ( *parser) () { // Because of one-token look-ahead, print the previous token // when tracing as it provides a more readable output. The // very first token (!p.pos.IsValid()) is not initialized // (it is token.ILLEGAL), so don't print it. if .trace && .pos.IsValid() { := .tok.String() switch { case .tok.IsLiteral(): .printTrace(, .lit) case .tok.IsOperator(), .tok.IsKeyword(): .printTrace("\"" + + "\"") default: .printTrace() } } .pos, .tok, .lit = .scanner.Scan() } // Consume a comment and return it and the line on which it ends. func ( *parser) () ( *ast.Comment, int) { // /*-style comments may end on a different line than where they start. // Scan the comment for '\n' chars and adjust endline accordingly. = .file.Line(.pos) if .lit[1] == '*' { // don't use range here - no need to decode Unicode code points for := 0; < len(.lit); ++ { if .lit[] == '\n' { ++ } } } = &ast.Comment{Slash: .pos, Text: .lit} .next0() return } // Consume a group of adjacent comments, add it to the parser's // comments list, and return it together with the line at which // the last comment in the group ends. A non-comment token or n // empty lines terminate a comment group. // func ( *parser) ( int) ( *ast.CommentGroup, int) { var []*ast.Comment = .file.Line(.pos) for .tok == token.COMMENT && .file.Line(.pos) <= + { var *ast.Comment , = .consumeComment() = append(, ) } // add comment group to the comments list = &ast.CommentGroup{List: } .comments = append(.comments, ) return } // Advance to the next non-comment token. In the process, collect // any comment groups encountered, and remember the last lead and // line comments. // // A lead comment is a comment group that starts and ends in a // line without any other tokens and that is followed by a non-comment // token on the line immediately after the comment group. // // A line comment is a comment group that follows a non-comment // token on the same line, and that has no tokens after it on the line // where it ends. // // Lead and line comments may be considered documentation that is // stored in the AST. // func ( *parser) () { .leadComment = nil .lineComment = nil := .pos .next0() if .tok == token.COMMENT { var *ast.CommentGroup var int if .file.Line(.pos) == .file.Line() { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. , = .consumeCommentGroup(0) if .file.Line(.pos) != || .tok == token.EOF { // The next token is on a different line, thus // the last comment group is a line comment. .lineComment = } } // consume successor comments, if any = -1 for .tok == token.COMMENT { , = .consumeCommentGroup(1) } if +1 == .file.Line(.pos) { // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. .leadComment = } } } // A bailout panic is raised to indicate early termination. type bailout struct{} func ( *parser) ( token.Pos, string) { if .trace { defer un(trace(, "error: "+)) } := .file.Position() // If AllErrors is not set, discard errors reported on the same line // as the last recorded error and stop parsing if there are more than // 10 errors. if .mode&AllErrors == 0 { := len(.errors) if > 0 && .errors[-1].Pos.Line == .Line { return // discard - likely a spurious error } if > 10 { panic(bailout{}) } } .errors.Add(, ) } func ( *parser) ( token.Pos, string) { = "expected " + if == .pos { // the error happened at the current position; // make the error message more specific switch { case .tok == token.SEMICOLON && .lit == "\n": += ", found newline" case .tok.IsLiteral(): // print 123 rather than 'INT', etc. += ", found " + .lit default: += ", found '" + .tok.String() + "'" } } .error(, ) } func ( *parser) ( token.Token) token.Pos { := .pos if .tok != { .errorExpected(, "'"+.String()+"'") } .next() // make progress return } // expect2 is like expect, but it returns an invalid position // if the expected token is not found. func ( *parser) ( token.Token) ( token.Pos) { if .tok == { = .pos } else { .errorExpected(.pos, "'"+.String()+"'") } .next() // make progress return } // expectClosing is like expect but provides a better error message // for the common case of a missing comma before a newline. // func ( *parser) ( token.Token, string) token.Pos { if .tok != && .tok == token.SEMICOLON && .lit == "\n" { .error(.pos, "missing ',' before newline in "+) .next() } return .expect() } func ( *parser) () { // semicolon is optional before a closing ')' or '}' if .tok != token.RPAREN && .tok != token.RBRACE { switch .tok { case token.COMMA: // permit a ',' instead of a ';' but complain .errorExpected(.pos, "';'") fallthrough case token.SEMICOLON: .next() default: .errorExpected(.pos, "';'") .advance(stmtStart) } } } func ( *parser) ( string, token.Token) bool { if .tok == token.COMMA { return true } if .tok != { := "missing ','" if .tok == token.SEMICOLON && .lit == "\n" { += " before newline" } .error(.pos, +" in "+) return true // "insert" comma and continue } return false } func ( bool, string) { if ! { panic("go/parser internal error: " + ) } } // advance consumes tokens until the current token p.tok // is in the 'to' set, or token.EOF. For error recovery. func ( *parser) ( map[token.Token]bool) { for ; .tok != token.EOF; .next() { if [.tok] { // Return only if parser made some progress since last // sync or if it has not reached 10 advance calls without // progress. Otherwise consume at least one token to // avoid an endless parser loop (it is possible that // both parseOperand and parseStmt call advance and // correctly do not advance, thus the need for the // invocation limit p.syncCnt). if .pos == .syncPos && .syncCnt < 10 { .syncCnt++ return } if .pos > .syncPos { .syncPos = .pos .syncCnt = 0 return } // Reaching here indicates a parser bug, likely an // incorrect token list in this function, but it only // leads to skipping of possibly correct code if a // previous error is present, and thus is preferred // over a non-terminating parse. } } } var stmtStart = map[token.Token]bool{ token.BREAK: true, token.CONST: true, token.CONTINUE: true, token.DEFER: true, token.FALLTHROUGH: true, token.FOR: true, token.GO: true, token.GOTO: true, token.IF: true, token.RETURN: true, token.SELECT: true, token.SWITCH: true, token.TYPE: true, token.VAR: true, } var declStart = map[token.Token]bool{ token.CONST: true, token.TYPE: true, token.VAR: true, } var exprEnd = map[token.Token]bool{ token.COMMA: true, token.COLON: true, token.SEMICOLON: true, token.RPAREN: true, token.RBRACK: true, token.RBRACE: true, } // safePos returns a valid file position for a given position: If pos // is valid to begin with, safePos returns pos. If pos is out-of-range, // safePos returns the EOF position. // // This is hack to work around "artificial" end positions in the AST which // are computed by adding 1 to (presumably valid) token positions. If the // token positions are invalid due to parse errors, the resulting end position // may be past the file's EOF position, which would lead to panics if used // later on. // func ( *parser) ( token.Pos) ( token.Pos) { defer func() { if recover() != nil { = token.Pos(.file.Base() + .file.Size()) // EOF position } }() _ = .file.Offset() // trigger a panic if position is out-of-range return } // ---------------------------------------------------------------------------- // Identifiers func ( *parser) () *ast.Ident { := .pos := "_" if .tok == token.IDENT { = .lit .next() } else { .expect(token.IDENT) // use expect() error handling } return &ast.Ident{NamePos: , Name: } } func ( *parser) () ( []*ast.Ident) { if .trace { defer un(trace(, "IdentList")) } = append(, .parseIdent()) for .tok == token.COMMA { .next() = append(, .parseIdent()) } return } // ---------------------------------------------------------------------------- // Common productions // If lhs is set, result list elements which are identifiers are not resolved. func ( *parser) () ( []ast.Expr) { if .trace { defer un(trace(, "ExpressionList")) } = append(, .checkExpr(.parseExpr())) for .tok == token.COMMA { .next() = append(, .checkExpr(.parseExpr())) } return } func ( *parser) ( bool) []ast.Expr { := .inRhs .inRhs = := .parseExprList() .inRhs = return } // ---------------------------------------------------------------------------- // Types func ( *parser) () ast.Expr { if .trace { defer un(trace(, "Type")) } := .tryIdentOrType() if == nil { := .pos .errorExpected(, "type") .advance(exprEnd) return &ast.BadExpr{From: , To: .pos} } return } func ( *parser) ( *ast.Ident) ast.Expr { if .trace { defer un(trace(, "QualifiedIdent")) } := .parseTypeName() if .tok == token.LBRACK && .allowGenerics() { = .parseTypeInstance() } return } // If the result is an identifier, it is not resolved. func ( *parser) ( *ast.Ident) ast.Expr { if .trace { defer un(trace(, "TypeName")) } if == nil { = .parseIdent() } if .tok == token.PERIOD { // ident is a package name .next() := .parseIdent() return &ast.SelectorExpr{X: , Sel: } } return } // "[" has already been consumed, and lbrack is its position. // If len != nil it is the already consumed array length. func ( *parser) ( token.Pos, ast.Expr) *ast.ArrayType { if .trace { defer un(trace(, "ArrayType")) } if == nil { .exprLev++ // always permit ellipsis for more fault-tolerant parsing if .tok == token.ELLIPSIS { = &ast.Ellipsis{Ellipsis: .pos} .next() } else if .tok != token.RBRACK { = .parseRhs() } .exprLev-- } if .tok == token.COMMA { // Trailing commas are accepted in type parameter // lists but not in array type declarations. // Accept for better error handling but complain. .error(.pos, "unexpected comma; expecting ]") .next() } .expect(token.RBRACK) := .parseType() return &ast.ArrayType{Lbrack: , Len: , Elt: } } func ( *parser) ( *ast.Ident) (*ast.Ident, ast.Expr) { if .trace { defer un(trace(, "ArrayFieldOrTypeInstance")) } // TODO(gri) Should we allow a trailing comma in a type argument // list such as T[P,]? (We do in parseTypeInstance). := .expect(token.LBRACK) var []ast.Expr var token.Pos // TODO(rfindley): consider changing parseRhsOrType so that this function variable // is not needed. := .parseRhsOrType if !.allowGenerics() { = .parseRhs } if .tok != token.RBRACK { .exprLev++ = append(, ()) for .tok == token.COMMA { if !.IsValid() { = .pos } .next() = append(, ()) } .exprLev-- } := .expect(token.RBRACK) if len() == 0 { // x []E := .parseType() return , &ast.ArrayType{Lbrack: , Elt: } } // x [P]E or x[P] if len() == 1 { := .tryIdentOrType() if != nil { // x [P]E return , &ast.ArrayType{Lbrack: , Len: [0], Elt: } } if !.allowGenerics() { .error(, "missing element type in array type expression") return nil, &ast.BadExpr{From: [0].Pos(), To: [0].End()} } } if !.allowGenerics() { .error(, "expected ']', found ','") return , &ast.BadExpr{From: [0].Pos(), To: [len()-1].End()} } // x[P], x[P1, P2], ... return nil, typeparams.PackIndexExpr(, , , ) } func ( *parser) () *ast.Field { if .trace { defer un(trace(, "FieldDecl")) } := .leadComment var []*ast.Ident var ast.Expr if .tok == token.IDENT { := .parseIdent() if .tok == token.PERIOD || .tok == token.STRING || .tok == token.SEMICOLON || .tok == token.RBRACE { // embedded type = if .tok == token.PERIOD { = .parseQualifiedIdent() } } else { // name1, name2, ... T = []*ast.Ident{} for .tok == token.COMMA { .next() = append(, .parseIdent()) } // Careful dance: We don't know if we have an embedded instantiated // type T[P1, P2, ...] or a field T of array type []E or [P]E. if len() == 1 && .tok == token.LBRACK { , = .parseArrayFieldOrTypeInstance() if == nil { = nil } } else { // T P = .parseType() } } } else { // embedded, possibly generic type // (using the enclosing parentheses to distinguish it from a named field declaration) // TODO(rFindley) confirm that this doesn't allow parenthesized embedded type = .parseType() } var *ast.BasicLit if .tok == token.STRING { = &ast.BasicLit{ValuePos: .pos, Kind: .tok, Value: .lit} .next() } .expectSemi() // call before accessing p.linecomment := &ast.Field{Doc: , Names: , Type: , Tag: , Comment: .lineComment} return } func ( *parser) () *ast.StructType { if .trace { defer un(trace(, "StructType")) } := .expect(token.STRUCT) := .expect(token.LBRACE) var []*ast.Field for .tok == token.IDENT || .tok == token.MUL || .tok == token.LPAREN { // a field declaration cannot start with a '(' but we accept // it here for more robust parsing and better error messages // (parseFieldDecl will check and complain if necessary) = append(, .parseFieldDecl()) } := .expect(token.RBRACE) return &ast.StructType{ Struct: , Fields: &ast.FieldList{ Opening: , List: , Closing: , }, } } func ( *parser) () *ast.StarExpr { if .trace { defer un(trace(, "PointerType")) } := .expect(token.MUL) := .parseType() return &ast.StarExpr{Star: , X: } } func ( *parser) () *ast.Ellipsis { if .trace { defer un(trace(, "DotsType")) } := .expect(token.ELLIPSIS) := .parseType() return &ast.Ellipsis{Ellipsis: , Elt: } } type field struct { name *ast.Ident typ ast.Expr } func ( *parser) ( *ast.Ident, bool) ( field) { // TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax // package if .trace { defer un(trace(, "ParamDeclOrNil")) } := .tok if != nil { .tok = token.IDENT // force token.IDENT case in switch below } else if && .tok == token.TILDE { // "~" ... return field{nil, .embeddedElem(nil)} } switch .tok { case token.IDENT: // name if != nil { .name = .tok = } else { .name = .parseIdent() } switch .tok { case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN: // name type .typ = .parseType() case token.LBRACK: // name "[" type1, ..., typeN "]" or name "[" n "]" type .name, .typ = .parseArrayFieldOrTypeInstance(.name) case token.ELLIPSIS: // name "..." type .typ = .parseDotsType() return // don't allow ...type "|" ... case token.PERIOD: // name "." ... .typ = .parseQualifiedIdent(.name) .name = nil case token.TILDE: if { .typ = .embeddedElem(nil) return } case token.OR: if { // name "|" typeset .typ = .embeddedElem(.name) .name = nil return } } case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN: // type .typ = .parseType() case token.ELLIPSIS: // "..." type // (always accepted) .typ = .parseDotsType() return // don't allow ...type "|" ... default: // TODO(rfindley): this looks incorrect in the case of type parameter // lists. .errorExpected(.pos, ")") .advance(exprEnd) } // [name] type "|" if && .tok == token.OR && .typ != nil { .typ = .embeddedElem(.typ) } return } func ( *parser) ( *ast.Ident, ast.Expr, token.Token) ( []*ast.Field) { if .trace { defer un(trace(, "ParameterList")) } // Type parameters are the only parameter list closed by ']'. := == token.RBRACK // Type set notation is ok in type parameter lists. := && .allowTypeSets() := .pos if != nil { = .Pos() } var []field var int // number of parameters that have an explicit name and type for != nil || .tok != && .tok != token.EOF { var field if != nil { if { = .embeddedElem() } = field{, } } else { = .parseParamDecl(, ) } = nil // 1st name was consumed if present = nil // 1st typ was consumed if present if .name != nil || .typ != nil { = append(, ) if .name != nil && .typ != nil { ++ } } if !.atComma("parameter list", ) { break } .next() } if len() == 0 { return // not uncommon } // TODO(gri) parameter distribution and conversion to []*ast.Field // can be combined and made more efficient // distribute parameter types if == 0 { // all unnamed => found names are type names for := 0; < len(); ++ { := &[] if := .name; != nil { .typ = .name = nil } } if { .error(, "all type parameters must be named") } } else if != len() { // some named => all must be named := true var ast.Expr := for := len() - 1; >= 0; -- { if := &[]; .typ != nil { = .typ if .name == nil { = false = .typ.Pos() := ast.NewIdent("_") .NamePos = .Pos() // correct position .name = } } else if != nil { .typ = } else { // par.typ == nil && typ == nil => we only have a par.name = false = .name.Pos() .typ = &ast.BadExpr{From: .name.Pos(), To: .pos} } } if ! { if { .error(, "all type parameters must be named") } else { .error(, "mixed named and unnamed parameters") } } } // convert list []*ast.Field if == 0 { // parameter list consists of types only for , := range { assert(.typ != nil, "nil type in unnamed parameter list") = append(, &ast.Field{Type: .typ}) } return } // parameter list consists of named parameters with types var []*ast.Ident var ast.Expr := func() { assert( != nil, "nil type in named parameter list") := &ast.Field{Names: , Type: } = append(, ) = nil } for , := range { if .typ != { if len() > 0 { () } = .typ } = append(, .name) } if len() > 0 { () } return } func ( *parser) ( bool) (, *ast.FieldList) { if .trace { defer un(trace(, "Parameters")) } if .allowGenerics() && && .tok == token.LBRACK { := .pos .next() // [T any](params) syntax := .parseParameterList(nil, nil, token.RBRACK) := .expect(token.RBRACK) = &ast.FieldList{Opening: , List: , Closing: } // Type parameter lists must not be empty. if .NumFields() == 0 { .error(.Closing, "empty type parameter list") = nil // avoid follow-on errors } } := .expect(token.LPAREN) var []*ast.Field if .tok != token.RPAREN { = .parseParameterList(nil, nil, token.RPAREN) } := .expect(token.RPAREN) = &ast.FieldList{Opening: , List: , Closing: } return } func ( *parser) () *ast.FieldList { if .trace { defer un(trace(, "Result")) } if .tok == token.LPAREN { , := .parseParameters(false) return } := .tryIdentOrType() if != nil { := make([]*ast.Field, 1) [0] = &ast.Field{Type: } return &ast.FieldList{List: } } return nil } func ( *parser) () *ast.FuncType { if .trace { defer un(trace(, "FuncType")) } := .expect(token.FUNC) , := .parseParameters(true) if != nil { .error(.Pos(), "function type must have no type parameters") } := .parseResult() return &ast.FuncType{Func: , Params: , Results: } } func ( *parser) () *ast.Field { if .trace { defer un(trace(, "MethodSpec")) } := .leadComment var []*ast.Ident var ast.Expr := .parseTypeName(nil) if , := .(*ast.Ident); != nil { switch { case .tok == token.LBRACK && .allowGenerics(): // generic method or embedded instantiated type := .pos .next() .exprLev++ := .parseExpr() .exprLev-- if , := .(*ast.Ident); != nil && .tok != token.COMMA && .tok != token.RBRACK { // generic method m[T any] // // Interface methods do not have type parameters. We parse them for a // better error message and improved error recovery. _ = .parseParameterList(, nil, token.RBRACK) _ = .expect(token.RBRACK) .error(, "interface method must have no type parameters") // TODO(rfindley) refactor to share code with parseFuncType. , := .parseParameters(false) := .parseResult() = []*ast.Ident{} = &ast.FuncType{ Func: token.NoPos, Params: , Results: , } } else { // embedded instantiated type // TODO(rfindley) should resolve all identifiers in x. := []ast.Expr{} if .atComma("type argument list", token.RBRACK) { .exprLev++ .next() for .tok != token.RBRACK && .tok != token.EOF { = append(, .parseType()) if !.atComma("type argument list", token.RBRACK) { break } .next() } .exprLev-- } := .expectClosing(token.RBRACK, "type argument list") = typeparams.PackIndexExpr(, , , ) } case .tok == token.LPAREN: // ordinary method // TODO(rfindley) refactor to share code with parseFuncType. , := .parseParameters(false) := .parseResult() = []*ast.Ident{} = &ast.FuncType{Func: token.NoPos, Params: , Results: } default: // embedded type = } } else { // embedded, possibly instantiated type = if .tok == token.LBRACK && .allowGenerics() { // embedded instantiated interface = .parseTypeInstance() } } // Comment is added at the callsite: the field below may joined with // additional type specs using '|'. // TODO(rfindley) this should be refactored. // TODO(rfindley) add more tests for comment handling. return &ast.Field{Doc: , Names: , Type: } } func ( *parser) ( ast.Expr) ast.Expr { if .trace { defer un(trace(, "EmbeddedElem")) } if == nil { = .embeddedTerm() } for .tok == token.OR { := new(ast.BinaryExpr) .OpPos = .pos .Op = token.OR .next() .X = .Y = .embeddedTerm() = } return } func ( *parser) () ast.Expr { if .trace { defer un(trace(, "EmbeddedTerm")) } if .tok == token.TILDE { := new(ast.UnaryExpr) .OpPos = .pos .Op = token.TILDE .next() .X = .parseType() return } := .tryIdentOrType() if == nil { := .pos .errorExpected(, "~ term or type") .advance(exprEnd) return &ast.BadExpr{From: , To: .pos} } return } func ( *parser) () *ast.InterfaceType { if .trace { defer un(trace(, "InterfaceType")) } := .expect(token.INTERFACE) := .expect(token.LBRACE) var []*ast.Field : for { switch { case .tok == token.IDENT: := .parseMethodSpec() if .Names == nil && .allowGenerics() { .Type = .embeddedElem(.Type) } .expectSemi() .Comment = .lineComment = append(, ) case .tok == token.TILDE && .allowGenerics(): := .embeddedElem(nil) .expectSemi() := .lineComment = append(, &ast.Field{Type: , Comment: }) case .allowGenerics(): if := .tryIdentOrType(); != nil { := .embeddedElem() .expectSemi() := .lineComment = append(, &ast.Field{Type: , Comment: }) } else { break } default: break } } // TODO(rfindley): the error produced here could be improved, since we could // accept a identifier, 'type', or a '}' at this point. := .expect(token.RBRACE) return &ast.InterfaceType{ Interface: , Methods: &ast.FieldList{ Opening: , List: , Closing: , }, } } func ( *parser) () *ast.MapType { if .trace { defer un(trace(, "MapType")) } := .expect(token.MAP) .expect(token.LBRACK) := .parseType() .expect(token.RBRACK) := .parseType() return &ast.MapType{Map: , Key: , Value: } } func ( *parser) () *ast.ChanType { if .trace { defer un(trace(, "ChanType")) } := .pos := ast.SEND | ast.RECV var token.Pos if .tok == token.CHAN { .next() if .tok == token.ARROW { = .pos .next() = ast.SEND } } else { = .expect(token.ARROW) .expect(token.CHAN) = ast.RECV } := .parseType() return &ast.ChanType{Begin: , Arrow: , Dir: , Value: } } func ( *parser) ( ast.Expr) ast.Expr { assert(.allowGenerics(), "parseTypeInstance while not parsing type params") if .trace { defer un(trace(, "TypeInstance")) } := .expect(token.LBRACK) .exprLev++ var []ast.Expr for .tok != token.RBRACK && .tok != token.EOF { = append(, .parseType()) if !.atComma("type argument list", token.RBRACK) { break } .next() } .exprLev-- := .expectClosing(token.RBRACK, "type argument list") if len() == 0 { .errorExpected(, "type argument list") return &ast.IndexExpr{ X: , Lbrack: , Index: &ast.BadExpr{From: + 1, To: }, Rbrack: , } } return typeparams.PackIndexExpr(, , , ) } func ( *parser) () ast.Expr { switch .tok { case token.IDENT: := .parseTypeName(nil) if .tok == token.LBRACK && .allowGenerics() { = .parseTypeInstance() } return case token.LBRACK: := .expect(token.LBRACK) return .parseArrayType(, nil) case token.STRUCT: return .parseStructType() case token.MUL: return .parsePointerType() case token.FUNC: := .parseFuncType() return case token.INTERFACE: return .parseInterfaceType() case token.MAP: return .parseMapType() case token.CHAN, token.ARROW: return .parseChanType() case token.LPAREN: := .pos .next() := .parseType() := .expect(token.RPAREN) return &ast.ParenExpr{Lparen: , X: , Rparen: } } // no type found return nil } // ---------------------------------------------------------------------------- // Blocks func ( *parser) () ( []ast.Stmt) { if .trace { defer un(trace(, "StatementList")) } for .tok != token.CASE && .tok != token.DEFAULT && .tok != token.RBRACE && .tok != token.EOF { = append(, .parseStmt()) } return } func ( *parser) () *ast.BlockStmt { if .trace { defer un(trace(, "Body")) } := .expect(token.LBRACE) := .parseStmtList() := .expect2(token.RBRACE) return &ast.BlockStmt{Lbrace: , List: , Rbrace: } } func ( *parser) () *ast.BlockStmt { if .trace { defer un(trace(, "BlockStmt")) } := .expect(token.LBRACE) := .parseStmtList() := .expect2(token.RBRACE) return &ast.BlockStmt{Lbrace: , List: , Rbrace: } } // ---------------------------------------------------------------------------- // Expressions func ( *parser) () ast.Expr { if .trace { defer un(trace(, "FuncTypeOrLit")) } := .parseFuncType() if .tok != token.LBRACE { // function type only return } .exprLev++ := .parseBody() .exprLev-- return &ast.FuncLit{Type: , Body: } } // parseOperand may return an expression or a raw type (incl. array // types of the form [...]T. Callers must verify the result. // func ( *parser) () ast.Expr { if .trace { defer un(trace(, "Operand")) } switch .tok { case token.IDENT: := .parseIdent() return case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING: := &ast.BasicLit{ValuePos: .pos, Kind: .tok, Value: .lit} .next() return case token.LPAREN: := .pos .next() .exprLev++ := .parseRhsOrType() // types may be parenthesized: (some type) .exprLev-- := .expect(token.RPAREN) return &ast.ParenExpr{Lparen: , X: , Rparen: } case token.FUNC: return .parseFuncTypeOrLit() } if := .tryIdentOrType(); != nil { // do not consume trailing type parameters // could be type for composite literal or conversion , := .(*ast.Ident) assert(!, "type cannot be identifier") return } // we have an error := .pos .errorExpected(, "operand") .advance(stmtStart) return &ast.BadExpr{From: , To: .pos} } func ( *parser) ( ast.Expr) ast.Expr { if .trace { defer un(trace(, "Selector")) } := .parseIdent() return &ast.SelectorExpr{X: , Sel: } } func ( *parser) ( ast.Expr) ast.Expr { if .trace { defer un(trace(, "TypeAssertion")) } := .expect(token.LPAREN) var ast.Expr if .tok == token.TYPE { // type switch: typ == nil .next() } else { = .parseType() } := .expect(token.RPAREN) return &ast.TypeAssertExpr{X: , Type: , Lparen: , Rparen: } } func ( *parser) ( ast.Expr) ast.Expr { if .trace { defer un(trace(, "parseIndexOrSliceOrInstance")) } := .expect(token.LBRACK) if .tok == token.RBRACK { // empty index, slice or index expressions are not permitted; // accept them for parsing tolerance, but complain .errorExpected(.pos, "operand") := .pos .next() return &ast.IndexExpr{ X: , Lbrack: , Index: &ast.BadExpr{From: , To: }, Rbrack: , } } .exprLev++ const = 3 // change the 3 to 2 to disable 3-index slices var []ast.Expr var []ast.Expr var [ - 1]token.Pos var token.Pos if .tok != token.COLON { // We can't know if we have an index expression or a type instantiation; // so even if we see a (named) type we are not going to be in type context. [0] = .parseRhsOrType() } := 0 switch .tok { case token.COLON: // slice expression for .tok == token.COLON && < len() { [] = .pos ++ .next() if .tok != token.COLON && .tok != token.RBRACK && .tok != token.EOF { [] = .parseRhs() } } case token.COMMA: = .pos // instance expression = append(, [0]) for .tok == token.COMMA { .next() if .tok != token.RBRACK && .tok != token.EOF { = append(, .parseType()) } } } .exprLev-- := .expect(token.RBRACK) if > 0 { // slice expression := false if == 2 { = true // Check presence of 2nd and 3rd index here rather than during type-checking // to prevent erroneous programs from passing through gofmt (was issue 7305). if [1] == nil { .error([0], "2nd index required in 3-index slice") [1] = &ast.BadExpr{From: [0] + 1, To: [1]} } if [2] == nil { .error([1], "3rd index required in 3-index slice") [2] = &ast.BadExpr{From: [1] + 1, To: } } } return &ast.SliceExpr{X: , Lbrack: , Low: [0], High: [1], Max: [2], Slice3: , Rbrack: } } if len() == 0 { // index expression return &ast.IndexExpr{X: , Lbrack: , Index: [0], Rbrack: } } if !.allowGenerics() { .error(, "expected ']' or ':', found ','") return &ast.BadExpr{From: [0].Pos(), To: [len()-1].End()} } // instance expression return typeparams.PackIndexExpr(, , , ) } func ( *parser) ( ast.Expr) *ast.CallExpr { if .trace { defer un(trace(, "CallOrConversion")) } := .expect(token.LPAREN) .exprLev++ var []ast.Expr var token.Pos for .tok != token.RPAREN && .tok != token.EOF && !.IsValid() { = append(, .parseRhsOrType()) // builtins may expect a type: make(some type, ...) if .tok == token.ELLIPSIS { = .pos .next() } if !.atComma("argument list", token.RPAREN) { break } .next() } .exprLev-- := .expectClosing(token.RPAREN, "argument list") return &ast.CallExpr{Fun: , Lparen: , Args: , Ellipsis: , Rparen: } } func ( *parser) () ast.Expr { if .trace { defer un(trace(, "Element")) } if .tok == token.LBRACE { return .parseLiteralValue(nil) } := .checkExpr(.parseExpr()) return } func ( *parser) () ast.Expr { if .trace { defer un(trace(, "Element")) } := .parseValue() if .tok == token.COLON { := .pos .next() = &ast.KeyValueExpr{Key: , Colon: , Value: .parseValue()} } return } func ( *parser) () ( []ast.Expr) { if .trace { defer un(trace(, "ElementList")) } for .tok != token.RBRACE && .tok != token.EOF { = append(, .parseElement()) if !.atComma("composite literal", token.RBRACE) { break } .next() } return } func ( *parser) ( ast.Expr) ast.Expr { if .trace { defer un(trace(, "LiteralValue")) } := .expect(token.LBRACE) var []ast.Expr .exprLev++ if .tok != token.RBRACE { = .parseElementList() } .exprLev-- := .expectClosing(token.RBRACE, "composite literal") return &ast.CompositeLit{Type: , Lbrace: , Elts: , Rbrace: } } // checkExpr checks that x is an expression (and not a type). func ( *parser) ( ast.Expr) ast.Expr { switch unparen().(type) { case *ast.BadExpr: case *ast.Ident: case *ast.BasicLit: case *ast.FuncLit: case *ast.CompositeLit: case *ast.ParenExpr: panic("unreachable") case *ast.SelectorExpr: case *ast.IndexExpr: case *ast.IndexListExpr: case *ast.SliceExpr: case *ast.TypeAssertExpr: // If t.Type == nil we have a type assertion of the form // y.(type), which is only allowed in type switch expressions. // It's hard to exclude those but for the case where we are in // a type switch. Instead be lenient and test this in the type // checker. case *ast.CallExpr: case *ast.StarExpr: case *ast.UnaryExpr: case *ast.BinaryExpr: default: // all other nodes are not proper expressions .errorExpected(.Pos(), "expression") = &ast.BadExpr{From: .Pos(), To: .safePos(.End())} } return } // If x is of the form (T), unparen returns unparen(T), otherwise it returns x. func ( ast.Expr) ast.Expr { if , := .(*ast.ParenExpr); { = (.X) } return } // checkExprOrType checks that x is an expression or a type // (and not a raw type such as [...]T). // func ( *parser) ( ast.Expr) ast.Expr { switch t := unparen().(type) { case *ast.ParenExpr: panic("unreachable") case *ast.ArrayType: if , := .Len.(*ast.Ellipsis); { .error(.Pos(), "expected array length, found '...'") = &ast.BadExpr{From: .Pos(), To: .safePos(.End())} } } // all other nodes are expressions or types return } func ( *parser) ( ast.Expr) ast.Expr { if .trace { defer un(trace(, "PrimaryExpr")) } if == nil { = .parseOperand() } for { switch .tok { case token.PERIOD: .next() switch .tok { case token.IDENT: = .parseSelector(.checkExprOrType()) case token.LPAREN: = .parseTypeAssertion(.checkExpr()) default: := .pos .errorExpected(, "selector or type assertion") // TODO(rFindley) The check for token.RBRACE below is a targeted fix // to error recovery sufficient to make the x/tools tests to // pass with the new parsing logic introduced for type // parameters. Remove this once error recovery has been // more generally reconsidered. if .tok != token.RBRACE { .next() // make progress } := &ast.Ident{NamePos: , Name: "_"} = &ast.SelectorExpr{X: , Sel: } } case token.LBRACK: = .parseIndexOrSliceOrInstance(.checkExpr()) case token.LPAREN: = .parseCallOrConversion(.checkExprOrType()) case token.LBRACE: // operand may have returned a parenthesized complit // type; accept it but complain if we have a complit := unparen() // determine if '{' belongs to a composite literal or a block statement switch .(type) { case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr: if .exprLev < 0 { return } // x is possibly a composite literal type case *ast.IndexExpr, *ast.IndexListExpr: if .exprLev < 0 { return } // x is possibly a composite literal type case *ast.ArrayType, *ast.StructType, *ast.MapType: // x is a composite literal type default: return } if != { .error(.Pos(), "cannot parenthesize type in composite literal") // already progressed, no need to advance } = .parseLiteralValue() default: return } } } func ( *parser) () ast.Expr { if .trace { defer un(trace(, "UnaryExpr")) } switch .tok { case token.ADD, token.SUB, token.NOT, token.XOR, token.AND: , := .pos, .tok .next() := .() return &ast.UnaryExpr{OpPos: , Op: , X: .checkExpr()} case token.ARROW: // channel type or receive expression := .pos .next() // If the next token is token.CHAN we still don't know if it // is a channel type or a receive operation - we only know // once we have found the end of the unary expression. There // are two cases: // // <- type => (<-type) must be channel type // <- expr => <-(expr) is a receive from an expression // // In the first case, the arrow must be re-associated with // the channel type parsed already: // // <- (chan type) => (<-chan type) // <- (chan<- type) => (<-chan (<-type)) := .() // determine which case we have if , := .(*ast.ChanType); { // (<-type) // re-associate position info and <- := ast.SEND for && == ast.SEND { if .Dir == ast.RECV { // error: (<-type) is (<-(<-chan T)) .errorExpected(.Arrow, "'chan'") } , .Begin, .Arrow = .Arrow, , , .Dir = .Dir, ast.RECV , = .Value.(*ast.ChanType) } if == ast.SEND { .errorExpected(, "channel type") } return } // <-(expr) return &ast.UnaryExpr{OpPos: , Op: token.ARROW, X: .checkExpr()} case token.MUL: // pointer type or unary "*" expression := .pos .next() := .() return &ast.StarExpr{Star: , X: .checkExprOrType()} } return .parsePrimaryExpr(nil) } func ( *parser) () (token.Token, int) { := .tok if .inRhs && == token.ASSIGN { = token.EQL } return , .Precedence() } // parseBinaryExpr parses a (possibly) binary expression. // If x is non-nil, it is used as the left operand. // If check is true, operands are checked to be valid expressions. // // TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring. func ( *parser) ( ast.Expr, int, bool) ast.Expr { if .trace { defer un(trace(, "BinaryExpr")) } if == nil { = .parseUnaryExpr() } for { , := .tokPrec() if < { return } := .expect() := .(nil, +1, ) if { = .checkExpr() = .checkExpr() } = &ast.BinaryExpr{X: , OpPos: , Op: , Y: } } } // checkBinaryExpr checks binary expressions that were not already checked by // parseBinaryExpr, because the latter was called with check=false. func ( *parser) ( ast.Expr) { , := .(*ast.BinaryExpr) if ! { return } .X = .checkExpr(.X) .Y = .checkExpr(.Y) // parseBinaryExpr checks x and y for each binary expr in a tree, so we // traverse the tree of binary exprs starting from x. .(.X) .(.Y) } // The result may be a type or even a raw type ([...]int). Callers must // check the result (using checkExpr or checkExprOrType), depending on // context. func ( *parser) () ast.Expr { if .trace { defer un(trace(, "Expression")) } return .parseBinaryExpr(nil, token.LowestPrec+1, true) } func ( *parser) () ast.Expr { := .inRhs .inRhs = true := .checkExpr(.parseExpr()) .inRhs = return } func ( *parser) () ast.Expr { := .inRhs .inRhs = true := .checkExprOrType(.parseExpr()) .inRhs = return } // ---------------------------------------------------------------------------- // Statements // Parsing modes for parseSimpleStmt. const ( basic = iota labelOk rangeOk ) // parseSimpleStmt returns true as 2nd result if it parsed the assignment // of a range clause (with mode == rangeOk). The returned statement is an // assignment with a right-hand side that is a single unary expression of // the form "range x". No guarantees are given for the left-hand side. func ( *parser) ( int) (ast.Stmt, bool) { if .trace { defer un(trace(, "SimpleStmt")) } := .parseList(false) switch .tok { case token.DEFINE, token.ASSIGN, token.ADD_ASSIGN, token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN, token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN, token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN: // assignment statement, possibly part of a range clause , := .pos, .tok .next() var []ast.Expr := false if == rangeOk && .tok == token.RANGE && ( == token.DEFINE || == token.ASSIGN) { := .pos .next() = []ast.Expr{&ast.UnaryExpr{OpPos: , Op: token.RANGE, X: .parseRhs()}} = true } else { = .parseList(true) } := &ast.AssignStmt{Lhs: , TokPos: , Tok: , Rhs: } if == token.DEFINE { .checkAssignStmt() } return , } if len() > 1 { .errorExpected([0].Pos(), "1 expression") // continue with first expression } switch .tok { case token.COLON: // labeled statement := .pos .next() if , := [0].(*ast.Ident); == labelOk && { // Go spec: The scope of a label is the body of the function // in which it is declared and excludes the body of any nested // function. := &ast.LabeledStmt{Label: , Colon: , Stmt: .parseStmt()} return , false } // The label declaration typically starts at x[0].Pos(), but the label // declaration may be erroneous due to a token after that position (and // before the ':'). If SpuriousErrors is not set, the (only) error // reported for the line is the illegal label error instead of the token // before the ':' that caused the problem. Thus, use the (latest) colon // position for error reporting. .error(, "illegal label declaration") return &ast.BadStmt{From: [0].Pos(), To: + 1}, false case token.ARROW: // send statement := .pos .next() := .parseRhs() return &ast.SendStmt{Chan: [0], Arrow: , Value: }, false case token.INC, token.DEC: // increment or decrement := &ast.IncDecStmt{X: [0], TokPos: .pos, Tok: .tok} .next() return , false } // expression return &ast.ExprStmt{X: [0]}, false } func ( *parser) ( *ast.AssignStmt) { for , := range .Lhs { if , := .(*ast.Ident); ! { .errorExpected(.Pos(), "identifier on left side of :=") } } } func ( *parser) ( string) *ast.CallExpr { := .parseRhsOrType() // could be a conversion: (some type)(x) if , := .(*ast.CallExpr); { return } if , := .(*ast.BadExpr); ! { // only report error if it's a new one .error(.safePos(.End()), fmt.Sprintf("function must be invoked in %s statement", )) } return nil } func ( *parser) () ast.Stmt { if .trace { defer un(trace(, "GoStmt")) } := .expect(token.GO) := .parseCallExpr("go") .expectSemi() if == nil { return &ast.BadStmt{From: , To: + 2} // len("go") } return &ast.GoStmt{Go: , Call: } } func ( *parser) () ast.Stmt { if .trace { defer un(trace(, "DeferStmt")) } := .expect(token.DEFER) := .parseCallExpr("defer") .expectSemi() if == nil { return &ast.BadStmt{From: , To: + 5} // len("defer") } return &ast.DeferStmt{Defer: , Call: } } func ( *parser) () *ast.ReturnStmt { if .trace { defer un(trace(, "ReturnStmt")) } := .pos .expect(token.RETURN) var []ast.Expr if .tok != token.SEMICOLON && .tok != token.RBRACE { = .parseList(true) } .expectSemi() return &ast.ReturnStmt{Return: , Results: } } func ( *parser) ( token.Token) *ast.BranchStmt { if .trace { defer un(trace(, "BranchStmt")) } := .expect() var *ast.Ident if != token.FALLTHROUGH && .tok == token.IDENT { = .parseIdent() } .expectSemi() return &ast.BranchStmt{TokPos: , Tok: , Label: } } func ( *parser) ( ast.Stmt, string) ast.Expr { if == nil { return nil } if , := .(*ast.ExprStmt); { return .checkExpr(.X) } := "simple statement" if , := .(*ast.AssignStmt); { = "assignment" } .error(.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", , )) return &ast.BadExpr{From: .Pos(), To: .safePos(.End())} } // parseIfHeader is an adjusted version of parser.header // in cmd/compile/internal/syntax/parser.go, which has // been tuned for better error handling. func ( *parser) () ( ast.Stmt, ast.Expr) { if .tok == token.LBRACE { .error(.pos, "missing condition in if statement") = &ast.BadExpr{From: .pos, To: .pos} return } // p.tok != token.LBRACE := .exprLev .exprLev = -1 if .tok != token.SEMICOLON { // accept potential variable declaration but complain if .tok == token.VAR { .next() .error(.pos, "var declaration not allowed in 'IF' initializer") } , _ = .parseSimpleStmt(basic) } var ast.Stmt var struct { token.Pos string // ";" or "\n"; valid if pos.IsValid() } if .tok != token.LBRACE { if .tok == token.SEMICOLON { . = .pos . = .lit .next() } else { .expect(token.SEMICOLON) } if .tok != token.LBRACE { , _ = .parseSimpleStmt(basic) } } else { = = nil } if != nil { = .makeExpr(, "boolean expression") } else if ..IsValid() { if . == "\n" { .error(., "unexpected newline, expecting { after if clause") } else { .error(., "missing condition in if statement") } } // make sure we have a valid AST if == nil { = &ast.BadExpr{From: .pos, To: .pos} } .exprLev = return } func ( *parser) () *ast.IfStmt { if .trace { defer un(trace(, "IfStmt")) } := .expect(token.IF) , := .parseIfHeader() := .parseBlockStmt() var ast.Stmt if .tok == token.ELSE { .next() switch .tok { case token.IF: = .() case token.LBRACE: = .parseBlockStmt() .expectSemi() default: .errorExpected(.pos, "if statement or block") = &ast.BadStmt{From: .pos, To: .pos} } } else { .expectSemi() } return &ast.IfStmt{If: , Init: , Cond: , Body: , Else: } } func ( *parser) () ( []ast.Expr) { if .trace { defer un(trace(, "TypeList")) } = append(, .parseType()) for .tok == token.COMMA { .next() = append(, .parseType()) } return } func ( *parser) ( bool) *ast.CaseClause { if .trace { defer un(trace(, "CaseClause")) } := .pos var []ast.Expr if .tok == token.CASE { .next() if { = .parseTypeList() } else { = .parseList(true) } } else { .expect(token.DEFAULT) } := .expect(token.COLON) := .parseStmtList() return &ast.CaseClause{Case: , List: , Colon: , Body: } } func ( ast.Expr) bool { , := .(*ast.TypeAssertExpr) return && .Type == nil } func ( *parser) ( ast.Stmt) bool { switch t := .(type) { case *ast.ExprStmt: // x.(type) return isTypeSwitchAssert(.X) case *ast.AssignStmt: // v := x.(type) if len(.Lhs) == 1 && len(.Rhs) == 1 && isTypeSwitchAssert(.Rhs[0]) { switch .Tok { case token.ASSIGN: // permit v = x.(type) but complain .error(.TokPos, "expected ':=', found '='") fallthrough case token.DEFINE: return true } } } return false } func ( *parser) () ast.Stmt { if .trace { defer un(trace(, "SwitchStmt")) } := .expect(token.SWITCH) var , ast.Stmt if .tok != token.LBRACE { := .exprLev .exprLev = -1 if .tok != token.SEMICOLON { , _ = .parseSimpleStmt(basic) } if .tok == token.SEMICOLON { .next() = = nil if .tok != token.LBRACE { // A TypeSwitchGuard may declare a variable in addition // to the variable declared in the initial SimpleStmt. // Introduce extra scope to avoid redeclaration errors: // // switch t := 0; t := x.(T) { ... } // // (this code is not valid Go because the first t // cannot be accessed and thus is never used, the extra // scope is needed for the correct error message). // // If we don't have a type switch, s2 must be an expression. // Having the extra nested but empty scope won't affect it. , _ = .parseSimpleStmt(basic) } } .exprLev = } := .isTypeSwitchGuard() := .expect(token.LBRACE) var []ast.Stmt for .tok == token.CASE || .tok == token.DEFAULT { = append(, .parseCaseClause()) } := .expect(token.RBRACE) .expectSemi() := &ast.BlockStmt{Lbrace: , List: , Rbrace: } if { return &ast.TypeSwitchStmt{Switch: , Init: , Assign: , Body: } } return &ast.SwitchStmt{Switch: , Init: , Tag: .makeExpr(, "switch expression"), Body: } } func ( *parser) () *ast.CommClause { if .trace { defer un(trace(, "CommClause")) } := .pos var ast.Stmt if .tok == token.CASE { .next() := .parseList(false) if .tok == token.ARROW { // SendStmt if len() > 1 { .errorExpected([0].Pos(), "1 expression") // continue with first expression } := .pos .next() := .parseRhs() = &ast.SendStmt{Chan: [0], Arrow: , Value: } } else { // RecvStmt if := .tok; == token.ASSIGN || == token.DEFINE { // RecvStmt with assignment if len() > 2 { .errorExpected([0].Pos(), "1 or 2 expressions") // continue with first two expressions = [0:2] } := .pos .next() := .parseRhs() := &ast.AssignStmt{Lhs: , TokPos: , Tok: , Rhs: []ast.Expr{}} if == token.DEFINE { .checkAssignStmt() } = } else { // lhs must be single receive operation if len() > 1 { .errorExpected([0].Pos(), "1 expression") // continue with first expression } = &ast.ExprStmt{X: [0]} } } } else { .expect(token.DEFAULT) } := .expect(token.COLON) := .parseStmtList() return &ast.CommClause{Case: , Comm: , Colon: , Body: } } func ( *parser) () *ast.SelectStmt { if .trace { defer un(trace(, "SelectStmt")) } := .expect(token.SELECT) := .expect(token.LBRACE) var []ast.Stmt for .tok == token.CASE || .tok == token.DEFAULT { = append(, .parseCommClause()) } := .expect(token.RBRACE) .expectSemi() := &ast.BlockStmt{Lbrace: , List: , Rbrace: } return &ast.SelectStmt{Select: , Body: } } func ( *parser) () ast.Stmt { if .trace { defer un(trace(, "ForStmt")) } := .expect(token.FOR) var , , ast.Stmt var bool if .tok != token.LBRACE { := .exprLev .exprLev = -1 if .tok != token.SEMICOLON { if .tok == token.RANGE { // "for range x" (nil lhs in assignment) := .pos .next() := []ast.Expr{&ast.UnaryExpr{OpPos: , Op: token.RANGE, X: .parseRhs()}} = &ast.AssignStmt{Rhs: } = true } else { , = .parseSimpleStmt(rangeOk) } } if ! && .tok == token.SEMICOLON { .next() = = nil if .tok != token.SEMICOLON { , _ = .parseSimpleStmt(basic) } .expectSemi() if .tok != token.LBRACE { , _ = .parseSimpleStmt(basic) } } .exprLev = } := .parseBlockStmt() .expectSemi() if { := .(*ast.AssignStmt) // check lhs var , ast.Expr switch len(.Lhs) { case 0: // nothing to do case 1: = .Lhs[0] case 2: , = .Lhs[0], .Lhs[1] default: .errorExpected(.Lhs[len(.Lhs)-1].Pos(), "at most 2 expressions") return &ast.BadStmt{From: , To: .safePos(.End())} } // parseSimpleStmt returned a right-hand side that // is a single unary expression of the form "range x" := .Rhs[0].(*ast.UnaryExpr).X return &ast.RangeStmt{ For: , Key: , Value: , TokPos: .TokPos, Tok: .Tok, X: , Body: , } } // regular for statement return &ast.ForStmt{ For: , Init: , Cond: .makeExpr(, "boolean or range expression"), Post: , Body: , } } func ( *parser) () ( ast.Stmt) { if .trace { defer un(trace(, "Statement")) } switch .tok { case token.CONST, token.TYPE, token.VAR: = &ast.DeclStmt{Decl: .parseDecl(stmtStart)} case // tokens that may start an expression token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators , _ = .parseSimpleStmt(labelOk) // because of the required look-ahead, labeled statements are // parsed by parseSimpleStmt - don't expect a semicolon after // them if , := .(*ast.LabeledStmt); ! { .expectSemi() } case token.GO: = .parseGoStmt() case token.DEFER: = .parseDeferStmt() case token.RETURN: = .parseReturnStmt() case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH: = .parseBranchStmt(.tok) case token.LBRACE: = .parseBlockStmt() .expectSemi() case token.IF: = .parseIfStmt() case token.SWITCH: = .parseSwitchStmt() case token.SELECT: = .parseSelectStmt() case token.FOR: = .parseForStmt() case token.SEMICOLON: // Is it ever possible to have an implicit semicolon // producing an empty statement in a valid program? // (handle correctly anyway) = &ast.EmptyStmt{Semicolon: .pos, Implicit: .lit == "\n"} .next() case token.RBRACE: // a semicolon may be omitted before a closing "}" = &ast.EmptyStmt{Semicolon: .pos, Implicit: true} default: // no statement found := .pos .errorExpected(, "statement") .advance(stmtStart) = &ast.BadStmt{From: , To: .pos} } return } // ---------------------------------------------------------------------------- // Declarations type parseSpecFunction func(doc *ast.CommentGroup, pos token.Pos, keyword token.Token, iota int) ast.Spec func ( string) bool { const = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" , := strconv.Unquote() // go/scanner returns a legal string literal for , := range { if !unicode.IsGraphic() || unicode.IsSpace() || strings.ContainsRune(, ) { return false } } return != "" } func ( *parser) ( *ast.CommentGroup, token.Pos, token.Token, int) ast.Spec { if .trace { defer un(trace(, "ImportSpec")) } var *ast.Ident switch .tok { case token.PERIOD: = &ast.Ident{NamePos: .pos, Name: "."} .next() case token.IDENT: = .parseIdent() } := .pos var string if .tok == token.STRING { = .lit if !isValidImport() { .error(, "invalid import path: "+) } .next() } else { .expect(token.STRING) // use expect() error handling } .expectSemi() // call before accessing p.linecomment // collect imports := &ast.ImportSpec{ Doc: , Name: , Path: &ast.BasicLit{ValuePos: , Kind: token.STRING, Value: }, Comment: .lineComment, } .imports = append(.imports, ) return } func ( *parser) ( *ast.CommentGroup, token.Pos, token.Token, int) ast.Spec { if .trace { defer un(trace(, .String()+"Spec")) } := .pos := .parseIdentList() := .tryIdentOrType() var []ast.Expr // always permit optional initialization for more tolerant parsing if .tok == token.ASSIGN { .next() = .parseList(true) } .expectSemi() // call before accessing p.linecomment switch { case token.VAR: if == nil && == nil { .error(, "missing variable type or initialization") } case token.CONST: if == nil && ( == 0 || != nil) { .error(, "missing constant value") } } := &ast.ValueSpec{ Doc: , Names: , Type: , Values: , Comment: .lineComment, } return } func ( *parser) ( *ast.TypeSpec, token.Pos, *ast.Ident, ast.Expr) { if .trace { defer un(trace(, "parseGenericType")) } := .parseParameterList(, , token.RBRACK) := .expect(token.RBRACK) .TypeParams = &ast.FieldList{Opening: , List: , Closing: } // Let the type checker decide whether to accept type parameters on aliases: // see issue #46477. if .tok == token.ASSIGN { // type alias .Assign = .pos .next() } .Type = .parseType() } func ( *parser) ( *ast.CommentGroup, token.Pos, token.Token, int) ast.Spec { if .trace { defer un(trace(, "TypeSpec")) } := .parseIdent() := &ast.TypeSpec{Doc: , Name: } if .tok == token.LBRACK && .allowGenerics() { := .pos .next() if .tok == token.IDENT { // We may have an array type or a type parameter list. // In either case we expect an expression x (which may // just be a name, or a more complex expression) which // we can analyze further. // // A type parameter list may have a type bound starting // with a "[" as in: P []E. In that case, simply parsing // an expression would lead to an error: P[] is invalid. // But since index or slice expressions are never constant // and thus invalid array length expressions, if we see a // "[" following a name it must be the start of an array // or slice constraint. Only if we don't see a "[" do we // need to parse a full expression. // Index or slice expressions are never constant and thus invalid // array length expressions. Thus, if we see a "[" following name // we can safely assume that "[" name starts a type parameter list. var ast.Expr = .parseIdent() if .tok != token.LBRACK { // To parse the expression starting with name, expand // the call sequence we would get by passing in name // to parser.expr, and pass in name to parsePrimaryExpr. .exprLev++ := .parsePrimaryExpr() = .parseBinaryExpr(, token.LowestPrec+1, false) .exprLev-- } // analyze the cases var *ast.Ident // pname != nil means pname is the type parameter name var ast.Expr // ptype != nil means ptype is the type parameter type; pname != nil in this case switch t := .(type) { case *ast.Ident: // Unless we see a "]", we are at the start of a type parameter list. if .tok != token.RBRACK { // d.Name "[" name ... = // no ptype } case *ast.BinaryExpr: // If we have an expression of the form name*T, and T is a (possibly // parenthesized) type literal or the next token is a comma, we are // at the start of a type parameter list. if , := .X.(*ast.Ident); != nil { if .Op == token.MUL && (isTypeLit(.Y) || .tok == token.COMMA) { // d.Name "[" name "*" t.Y // d.Name "[" name "*" t.Y "," // convert t into unary *t.Y = = &ast.StarExpr{Star: .OpPos, X: .Y} } } if == nil { // A normal binary expression. Since we passed check=false, we must // now check its operands. .checkBinaryExpr() } case *ast.CallExpr: // If we have an expression of the form name(T), and T is a (possibly // parenthesized) type literal or the next token is a comma, we are // at the start of a type parameter list. if , := .Fun.(*ast.Ident); != nil { if len(.Args) == 1 && !.Ellipsis.IsValid() && (isTypeLit(.Args[0]) || .tok == token.COMMA) { // d.Name "[" name "(" t.ArgList[0] ")" // d.Name "[" name "(" t.ArgList[0] ")" "," = = .Args[0] } } } if != nil { // d.Name "[" pname ... // d.Name "[" pname ptype ... // d.Name "[" pname ptype "," ... .parseGenericType(, , , ) } else { // d.Name "[" x ... .Type = .parseArrayType(, ) } } else { // array type .Type = .parseArrayType(, nil) } } else { // no type parameters if .tok == token.ASSIGN { // type alias .Assign = .pos .next() } .Type = .parseType() } .expectSemi() // call before accessing p.linecomment .Comment = .lineComment return } // isTypeLit reports whether x is a (possibly parenthesized) type literal. func ( ast.Expr) bool { switch x := .(type) { case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: return true case *ast.StarExpr: // *T may be a pointer dereferenciation. // Only consider *T as type literal if T is a type literal. return (.X) case *ast.ParenExpr: return (.X) } return false } func ( *parser) ( token.Token, parseSpecFunction) *ast.GenDecl { if .trace { defer un(trace(, "GenDecl("+.String()+")")) } := .leadComment := .expect() var , token.Pos var []ast.Spec if .tok == token.LPAREN { = .pos .next() for := 0; .tok != token.RPAREN && .tok != token.EOF; ++ { = append(, (.leadComment, , , )) } = .expect(token.RPAREN) .expectSemi() } else { = append(, (nil, , , 0)) } return &ast.GenDecl{ Doc: , TokPos: , Tok: , Lparen: , Specs: , Rparen: , } } func ( *parser) () *ast.FuncDecl { if .trace { defer un(trace(, "FunctionDecl")) } := .leadComment := .expect(token.FUNC) var *ast.FieldList if .tok == token.LPAREN { _, = .parseParameters(false) } := .parseIdent() , := .parseParameters(true) if != nil && != nil { // Method declarations do not have type parameters. We parse them for a // better error message and improved error recovery. .error(.Opening, "method must have no type parameters") = nil } := .parseResult() var *ast.BlockStmt switch .tok { case token.LBRACE: = .parseBody() .expectSemi() case token.SEMICOLON: .next() if .tok == token.LBRACE { // opening { of function declaration on next line .error(.pos, "unexpected semicolon or newline before {") = .parseBody() .expectSemi() } default: .expectSemi() } := &ast.FuncDecl{ Doc: , Recv: , Name: , Type: &ast.FuncType{ Func: , TypeParams: , Params: , Results: , }, Body: , } return } func ( *parser) ( map[token.Token]bool) ast.Decl { if .trace { defer un(trace(, "Declaration")) } var parseSpecFunction switch .tok { case token.CONST, token.VAR: = .parseValueSpec case token.TYPE: = .parseTypeSpec case token.FUNC: return .parseFuncDecl() default: := .pos .errorExpected(, "declaration") .advance() return &ast.BadDecl{From: , To: .pos} } return .parseGenDecl(.tok, ) } // ---------------------------------------------------------------------------- // Source files func ( *parser) () *ast.File { if .trace { defer un(trace(, "File")) } // Don't bother parsing the rest if we had errors scanning the first token. // Likely not a Go source file at all. if .errors.Len() != 0 { return nil } // package clause := .leadComment := .expect(token.PACKAGE) // Go spec: The package clause is not a declaration; // the package name does not appear in any scope. := .parseIdent() if .Name == "_" && .mode&DeclarationErrors != 0 { .error(.pos, "invalid package name _") } .expectSemi() // Don't bother parsing the rest if we had errors parsing the package clause. // Likely not a Go source file at all. if .errors.Len() != 0 { return nil } var []ast.Decl if .mode&PackageClauseOnly == 0 { // import decls for .tok == token.IMPORT { = append(, .parseGenDecl(token.IMPORT, .parseImportSpec)) } if .mode&ImportsOnly == 0 { // rest of package body for .tok != token.EOF { = append(, .parseDecl(declStart)) } } } := &ast.File{ Doc: , Package: , Name: , Decls: , Imports: .imports, Comments: .comments, } var func(token.Pos, string) if .mode&DeclarationErrors != 0 { = .error } if .mode&SkipObjectResolution == 0 { resolveFile(, .file, ) } return }